diff options
Diffstat (limited to 'chromium/base')
599 files changed, 28238 insertions, 3674 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn index 8df68ae8930..4a9258a9ee7 100644 --- a/chromium/base/BUILD.gn +++ b/chromium/base/BUILD.gn @@ -18,6 +18,7 @@ # huge sequence of random-looking conditionals. import("//base/allocator/allocator.gni") +import("//base/trace_event/features.gni") import("//build/buildflag_header.gni") import("//build/config/allocator.gni") import("//build/config/arm.gni") @@ -195,6 +196,10 @@ jumbo_component("base") { "callback_internal.h", "callback_list.h", "cancelable_callback.h", + "check.cc", + "check.h", + "check_op.cc", + "check_op.h", "command_line.cc", "command_line.h", "compiler_specific.h", @@ -316,6 +321,7 @@ jumbo_component("base") { "macros.h", "memory/aligned_memory.cc", "memory/aligned_memory.h", + "memory/checked_ptr.h", "memory/discardable_memory.cc", "memory/discardable_memory.h", "memory/discardable_memory_allocator.cc", @@ -426,6 +432,8 @@ jumbo_component("base") { "native_library.cc", "native_library.h", "no_destructor.h", + "notreached.cc", + "notreached.h", "observer_list.h", "observer_list_internal.cc", "observer_list_internal.h", @@ -478,7 +486,6 @@ jumbo_component("base") { "profiler/module_cache.cc", "profiler/module_cache.h", "profiler/native_unwinder.h", - "profiler/profile_builder.cc", "profiler/profile_builder.h", "profiler/register_context.h", "profiler/sample_metadata.cc", @@ -956,6 +963,8 @@ jumbo_component("base") { "files/file_path_watcher_win.cc", "files/file_util_win.cc", "files/file_win.cc", + "files/important_file_writer_cleaner.cc", + "files/important_file_writer_cleaner.h", "files/memory_mapped_file_win.cc", "logging_win.cc", "logging_win.h", @@ -1196,6 +1205,7 @@ jumbo_component("base") { "native_library_ios.mm", "process/launch_ios.cc", "process/process_metrics_ios.cc", + "profiler/module_cache_mac.cc", "profiler/stack_sampler_ios.cc", ] } @@ -1209,8 +1219,6 @@ jumbo_component("base") { "message_loop/message_pump_android.h", "os_compat_android.cc", "os_compat_android.h", - "profiler/native_unwinder_android.cc", - "profiler/native_unwinder_android.h", "profiler/stack_sampler_android.cc", "threading/platform_thread_android.cc", "trace_event/cpufreq_monitor_android.cc", @@ -1355,7 +1363,7 @@ jumbo_component("base") { "allocator/allocator_shim_override_linker_wrapped_symbols.h", ] all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ] - } else if (is_mac) { + } else if (is_mac || is_ios) { sources += [ "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc", "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.h", @@ -1390,7 +1398,6 @@ jumbo_component("base") { "android/android_image_reader_abi.h", "android/android_image_reader_compat.cc", "android/android_image_reader_compat.h", - "android/animation_frame_time_histogram.cc", "android/apk_assets.cc", "android/apk_assets.h", "android/application_status_listener.cc", @@ -1413,6 +1420,7 @@ jumbo_component("base") { "android/early_trace_event_binding.h", "android/event_log.cc", "android/event_log.h", + "android/feature_list_jni.cc", "android/field_trial_list.cc", "android/important_file_writer_android.cc", "android/int_string_callback.cc", @@ -1477,6 +1485,7 @@ jumbo_component("base") { "android/timezone_utils.cc", "android/timezone_utils.h", "android/trace_event_binding.cc", + "android/trace_event_binding.h", "android/unguessable_token_android.cc", "android/unguessable_token_android.h", "memory/platform_shared_memory_region_android.cc", @@ -1770,6 +1779,7 @@ jumbo_component("base") { "allocator/partition_allocator/partition_alloc.cc", "allocator/partition_allocator/partition_alloc.h", "allocator/partition_allocator/partition_alloc_constants.h", + "allocator/partition_allocator/partition_alloc_forward.h", "allocator/partition_allocator/partition_bucket.cc", "allocator/partition_allocator/partition_bucket.h", "allocator/partition_allocator/partition_cookie.h", @@ -1961,10 +1971,6 @@ jumbo_component("base") { "process/process_iterator.h", "process/process_metrics_posix.cc", "process/process_posix.cc", - "sampling_heap_profiler/poisson_allocation_sampler.cc", - "sampling_heap_profiler/poisson_allocation_sampler.h", - "sampling_heap_profiler/sampling_heap_profiler.cc", - "sampling_heap_profiler/sampling_heap_profiler.h", "sync_socket.h", "sync_socket_posix.cc", "synchronization/waitable_event_watcher.h", @@ -2002,7 +2008,6 @@ jumbo_component("base") { "message_loop/message_pump_mac.mm", "power_monitor/power_monitor_device_source_ios.mm", "process/memory_stubs.cc", - "profiler/module_cache_stub.cc", "strings/sys_string_conversions_mac.mm", "synchronization/waitable_event_mac.cc", "system/sys_info_ios.mm", @@ -2013,6 +2018,22 @@ jumbo_component("base") { "time/time_mac.cc", ] + if (use_allocator_shim) { + sources += [ + "allocator/allocator_interception_mac.h", + "allocator/allocator_interception_mac.mm", + "allocator/malloc_zone_functions_mac.cc", + "allocator/malloc_zone_functions_mac.h", + ] + } else { + sources -= [ + "sampling_heap_profiler/poisson_allocation_sampler.cc", + "sampling_heap_profiler/poisson_allocation_sampler.h", + "sampling_heap_profiler/sampling_heap_profiler.cc", + "sampling_heap_profiler/sampling_heap_profiler.h", + ] + } + if (current_cpu == "x64" || current_cpu == "arm64") { sources += [ "time/time_exploded_posix.cc" ] } else { @@ -2176,6 +2197,7 @@ buildflag_header("clang_profiling_buildflags") { flags = [ "CLANG_PROFILING=$use_clang_profiling", "CLANG_PROFILING_INSIDE_SANDBOX=$use_clang_profiling_inside_sandbox", + "USE_CLANG_COVERAGE=$use_clang_coverage", ] } @@ -2291,6 +2313,7 @@ test("base_perftests") { "message_loop/message_pump_perftest.cc", "observer_list_perftest.cc", "strings/string_util_perftest.cc", + "task/job_perftest.cc", "task/sequence_manager/sequence_manager_perftest.cc", "task/thread_pool/thread_pool_perftest.cc", "threading/thread_local_storage_perftest.cc", @@ -2353,6 +2376,11 @@ if (!is_ios) { "//build/win:default_exe_manifest", ] } + + executable("json_perftest_decodebench") { + sources = [ "json/json_perftest_decodebench.cc" ] + deps = [ ":base" ] + } } if (is_win) { @@ -2380,13 +2408,29 @@ if (is_win) { } } -if (is_win || is_mac) { - if (current_cpu == "x64" || (current_cpu == "arm64" && is_win)) { - # Must be a shared library so that it can be unloaded during testing. - loadable_module("base_profiler_test_support_library") { - testonly = true - sources = [ "profiler/test_support_library.cc" ] - } +if ((is_win && (current_cpu == "x64" || current_cpu == "arm64")) || is_mac || + (is_android && (current_cpu == "arm" || current_cpu == "arm64"))) { + # Must be a loadable module so that it can be loaded/unloaded at runtime + # during testing. + loadable_module("base_profiler_test_support_library") { + testonly = true + sources = [ "profiler/test_support_library.cc" ] + } +} + +if (is_android && (current_cpu == "arm" || current_cpu == "arm64")) { + source_set("native_unwinder_android") { + sources = [ + "profiler/native_unwinder_android.cc", + "profiler/native_unwinder_android.h", + "profiler/unwindstack_internal_android.cc", + "profiler/unwindstack_internal_android.h", + ] + + include_dirs = [ "//third_party/libunwindstack/src/libunwindstack/include" ] + + public_deps = [ ":base" ] + deps = [ "//third_party/libunwindstack" ] } } @@ -2492,6 +2536,7 @@ test("base_unittests") { "callback_list_unittest.cc", "callback_unittest.cc", "cancelable_callback_unittest.cc", + "check_unittest.cc", "command_line_unittest.cc", "component_export_unittest.cc", "containers/adapters_unittest.cc", @@ -2564,6 +2609,7 @@ test("base_unittests") { "location_unittest.cc", "logging_unittest.cc", "memory/aligned_memory_unittest.cc", + "memory/checked_ptr_unittest.cc", "memory/discardable_memory_backing_field_trial_unittest.cc", "memory/discardable_shared_memory_unittest.cc", "memory/memory_pressure_listener_unittest.cc", @@ -2705,6 +2751,7 @@ test("base_unittests") { "task/thread_pool/tracked_ref_unittest.cc", "task/thread_pool/worker_thread_stack_unittest.cc", "task/thread_pool/worker_thread_unittest.cc", + "task/thread_pool_unittest.cc", "task_runner_unittest.cc", "task_runner_util_unittest.cc", "template_util_unittest.cc", @@ -2817,6 +2864,7 @@ test("base_unittests") { sources += [ "debug/gdi_debug_util_win_unittest.cc", "file_version_info_win_unittest.cc", + "files/important_file_writer_cleaner_unittest.cc", "process/launch_unittest_win.cc", "test/test_reg_util_win_unittest.cc", "threading/platform_thread_win_unittest.cc", @@ -2927,6 +2975,18 @@ test("base_unittests") { if (current_cpu == "arm") { sources += [ "profiler/chrome_unwinder_android_unittest.cc" ] } + if (!exclude_unwind_tables && + (current_cpu == "arm" || current_cpu == "arm64")) { + sources += [ "profiler/native_unwinder_android_unittest.cc" ] + include_dirs = + [ "//third_party/libunwindstack/src/libunwindstack/include" ] + deps += [ + ":base_profiler_test_support_java", + ":base_profiler_test_support_jni_headers", + ":base_profiler_test_support_library", + ":native_unwinder_android", + ] + } sources += [ "android/android_image_reader_compat_unittest.cc", @@ -2998,6 +3058,13 @@ test("base_unittests") { "mac/scoped_nsobject_unittest.mm", "strings/sys_string_conversions_mac_unittest.mm", ] + + if (use_allocator_shim) { + sources += [ + "allocator/allocator_interception_mac_unittest.mm", + "allocator/malloc_zone_functions_mac_unittest.cc", + ] + } } if (use_partition_alloc) { @@ -3148,6 +3215,7 @@ if (enable_nocompile_tests) { "containers/buffer_iterator_unittest.nc", "containers/checked_iterators_unittest.nc", "containers/span_unittest.nc", + "memory/checked_ptr_unittest.nc", "memory/ref_counted_unittest.nc", "memory/weak_ptr_unittest.nc", "metrics/field_trial_params_unittest.nc", @@ -3174,7 +3242,6 @@ if (enable_nocompile_tests) { if (is_android) { generate_jni("base_jni_headers") { sources = [ - "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java", "android/java/src/org/chromium/base/ApkAssets.java", "android/java/src/org/chromium/base/ApplicationStatus.java", "android/java/src/org/chromium/base/BuildInfo.java", @@ -3185,6 +3252,7 @@ if (is_android) { "android/java/src/org/chromium/base/CpuFeatures.java", "android/java/src/org/chromium/base/EarlyTraceEvent.java", "android/java/src/org/chromium/base/EventLog.java", + "android/java/src/org/chromium/base/FeatureList.java", "android/java/src/org/chromium/base/FieldTrialList.java", "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java", "android/java/src/org/chromium/base/IntStringCallback.java", @@ -3252,12 +3320,11 @@ if (is_android) { "//third_party/android_deps:androidx_collection_collection_java", "//third_party/android_deps:androidx_core_core_java", "//third_party/android_deps:androidx_multidex_multidex_java", - "//third_party/jsr-305:jsr_305_javalib", + "//third_party/android_deps:com_google_code_findbugs_jsr305_java", ] sources = [ "android/java/src/org/chromium/base/ActivityState.java", - "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java", "android/java/src/org/chromium/base/ApiCompatibilityUtils.java", "android/java/src/org/chromium/base/ApkAssets.java", "android/java/src/org/chromium/base/ApplicationStatus.java", @@ -3265,6 +3332,7 @@ if (is_android) { "android/java/src/org/chromium/base/BuildInfo.java", "android/java/src/org/chromium/base/BundleUtils.java", "android/java/src/org/chromium/base/Callback.java", + "android/java/src/org/chromium/base/CallbackController.java", "android/java/src/org/chromium/base/CollectionUtil.java", "android/java/src/org/chromium/base/CommandLine.java", "android/java/src/org/chromium/base/CommandLineInitUtil.java", @@ -3373,6 +3441,7 @@ if (is_android) { "android/java/src/org/chromium/base/supplier/DestroyableObservableSupplier.java", "android/java/src/org/chromium/base/supplier/ObservableSupplier.java", "android/java/src/org/chromium/base/supplier/ObservableSupplierImpl.java", + "android/java/src/org/chromium/base/supplier/OneShotCallback.java", "android/java/src/org/chromium/base/supplier/Supplier.java", "android/java/src/org/chromium/base/task/AsyncTask.java", "android/java/src/org/chromium/base/task/BackgroundOnlyAsyncTask.java", @@ -3498,7 +3567,6 @@ if (is_android) { "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java", "test/android/javatests/src/org/chromium/base/test/BundleTestRule.java", "test/android/javatests/src/org/chromium/base/test/DestroyActivitiesRule.java", - "test/android/javatests/src/org/chromium/base/test/DisableNativeTestRule.java", "test/android/javatests/src/org/chromium/base/test/LifetimeAssertRule.java", "test/android/javatests/src/org/chromium/base/test/LoadNative.java", "test/android/javatests/src/org/chromium/base/test/ReachedCodeProfiler.java", @@ -3569,13 +3637,12 @@ if (is_android) { } android_library("base_junit_test_support") { - # Platform checks are broken for Robolectric. + # Platform checks are broken for Robolectric. See https://crbug.com/1071638. bypass_platform_checks = true testonly = true sources = [ "//third_party/robolectric/custom_asynctask/java/src/org/chromium/base/task/test/ShadowAsyncTask.java", "//third_party/robolectric/custom_asynctask/java/src/org/chromium/base/task/test/ShadowAsyncTaskBridge.java", - "android/junit/src/org/chromium/base/metrics/test/DisableHistogramsRule.java", "android/junit/src/org/chromium/base/metrics/test/ShadowRecordHistogram.java", "test/android/junit/src/org/chromium/base/task/test/BackgroundShadowAsyncTask.java", "test/android/junit/src/org/chromium/base/task/test/CustomShadowAsyncTask.java", @@ -3598,8 +3665,8 @@ if (is_android) { junit_binary("base_junit_tests") { skip_jetify = true sources = [ - "android/junit/src/org/chromium/base/AnimationFrameTimeHistogramTest.java", "android/junit/src/org/chromium/base/ApplicationStatusTest.java", + "android/junit/src/org/chromium/base/CallbackControllerTest.java", "android/junit/src/org/chromium/base/DiscardableReferencePoolTest.java", "android/junit/src/org/chromium/base/FileUtilsTest.java", "android/junit/src/org/chromium/base/LifetimeAssertTest.java", @@ -3612,6 +3679,7 @@ if (is_android) { "android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java", "android/junit/src/org/chromium/base/process_launcher/ChildProcessConnectionTest.java", "android/junit/src/org/chromium/base/supplier/ObservableSupplierImplTest.java", + "android/junit/src/org/chromium/base/supplier/OneShotCallbackTest.java", "android/junit/src/org/chromium/base/task/AsyncTaskThreadTest.java", "android/junit/src/org/chromium/base/task/TaskTraitsTest.java", "android/junit/src/org/chromium/base/util/GarbageCollectionTestUtilsUnitTest.java", @@ -3652,6 +3720,24 @@ if (is_android) { ] } + generate_jni("base_profiler_test_support_jni_headers") { + testonly = true + sources = + [ "android/javatests/src/org/chromium/base/profiler/TestSupport.java" ] + } + + android_library("base_profiler_test_support_java") { + testonly = true + sources = + [ "android/javatests/src/org/chromium/base/profiler/TestSupport.java" ] + + annotation_processor_deps = [ "//base/android/jni_generator:jni_processor" ] + deps = [ + "//base:base_java", + "//base:jni_java", + ] + } + generate_build_config_srcjar("base_build_config_gen") { use_final_fields = false } diff --git a/chromium/base/DEPS b/chromium/base/DEPS index 1c8911d0430..477a867eafb 100644 --- a/chromium/base/DEPS +++ b/chromium/base/DEPS @@ -6,6 +6,7 @@ include_rules = [ "+third_party/lss", "+third_party/modp_b64", "+third_party/tcmalloc", + "+third_party/libunwindstack/src/libunwindstack/include", # These are implicitly brought in from the root, and we don't want them. "-ipc", diff --git a/chromium/base/allocator/allocator.gni b/chromium/base/allocator/allocator.gni index 62e03b364d8..148e37d9106 100644 --- a/chromium/base/allocator/allocator.gni +++ b/chromium/base/allocator/allocator.gni @@ -17,7 +17,7 @@ if (is_android || is_mac || is_ios || is_asan || is_lsan || is_tsan || # The debug CRT on Windows has some debug features that are incompatible with # the shim. NaCl in particular does seem to link some binaries statically # against the debug CRT with "is_nacl=false". -if ((is_linux || is_android || is_mac || +if ((is_linux || is_android || is_mac || is_ios || (is_win && !is_component_build && !is_debug)) && !is_asan && !is_hwasan && !is_lsan && !is_tsan && !is_msan) { _default_use_allocator_shim = true @@ -37,9 +37,11 @@ assert(use_allocator == "none" || use_allocator == "tcmalloc") assert(!is_win || use_allocator == "none", "Tcmalloc doesn't work on Windows.") assert(!is_mac || use_allocator == "none", "Tcmalloc doesn't work on macOS.") +assert(!is_ios || use_allocator == "none", "Tcmalloc doesn't work on iOS.") -assert(!use_allocator_shim || is_linux || is_android || is_win || is_mac, - "use_allocator_shim works only on Android, Linux, macOS, and Windows.") +assert( + !use_allocator_shim || is_linux || is_android || is_win || is_mac || is_ios, + "use_allocator_shim works only on Android, iOS, Linux, macOS, and Windows.") if (is_win && use_allocator_shim) { assert(!is_component_build, diff --git a/chromium/base/allocator/allocator_extension.cc b/chromium/base/allocator/allocator_extension.cc index 87dd5731c06..72ae582a0f3 100644 --- a/chromium/base/allocator/allocator_extension.cc +++ b/chromium/base/allocator/allocator_extension.cc @@ -4,7 +4,7 @@ #include "base/allocator/allocator_extension.h" #include "base/allocator/buildflags.h" -#include "base/logging.h" +#include "base/check.h" #if BUILDFLAG(USE_TCMALLOC) #include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h" diff --git a/chromium/base/allocator/allocator_interception_mac.mm b/chromium/base/allocator/allocator_interception_mac.mm index 9db4dc5d48a..9272fec330b 100644 --- a/chromium/base/allocator/allocator_interception_mac.mm +++ b/chromium/base/allocator/allocator_interception_mac.mm @@ -21,7 +21,6 @@ #import <Foundation/Foundation.h> #include <errno.h> #include <mach/mach.h> -#include <mach/mach_vm.h> #import <objc/runtime.h> #include <stddef.h> @@ -32,13 +31,18 @@ #include "base/bind.h" #include "base/bits.h" #include "base/logging.h" -#include "base/mac/mac_util.h" #include "base/mac/mach_logging.h" #include "base/process/memory.h" #include "base/threading/sequenced_task_runner_handle.h" #include "build/build_config.h" #include "third_party/apple_apsl/CFBase.h" +#if defined(OS_IOS) +#include "base/ios/ios_util.h" +#else +#include "base/mac/mac_util.h" +#endif + namespace base { namespace allocator { @@ -59,18 +63,18 @@ bool g_oom_killer_enabled; // re-protected when modifications are complete. This approach assumes that // there is no contention for the protection of this memory. void DeprotectMallocZone(ChromeMallocZone* default_zone, - mach_vm_address_t* reprotection_start, - mach_vm_size_t* reprotection_length, + vm_address_t* reprotection_start, + vm_size_t* reprotection_length, vm_prot_t* reprotection_value) { mach_port_t unused; - *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone); + *reprotection_start = reinterpret_cast<vm_address_t>(default_zone); struct vm_region_basic_info_64 info; mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; - kern_return_t result = mach_vm_region( - mach_task_self(), reprotection_start, reprotection_length, - VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&info), - &count, &unused); - MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region"; + kern_return_t result = + vm_region_64(mach_task_self(), reprotection_start, reprotection_length, + VM_REGION_BASIC_INFO_64, + reinterpret_cast<vm_region_info_t>(&info), &count, &unused); + MACH_CHECK(result == KERN_SUCCESS, result) << "vm_region_64"; // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but // balance it with a deallocate in case this ever changes. See 10.9.2 @@ -80,11 +84,9 @@ void DeprotectMallocZone(ChromeMallocZone* default_zone, // Does the region fully enclose the zone pointers? Possibly unwarranted // simplification used: using the size of a full version 8 malloc zone rather // than the actual smaller size if the passed-in zone is not version 8. - CHECK(*reprotection_start <= - reinterpret_cast<mach_vm_address_t>(default_zone)); - mach_vm_size_t zone_offset = - reinterpret_cast<mach_vm_size_t>(default_zone) - - reinterpret_cast<mach_vm_size_t>(*reprotection_start); + CHECK(*reprotection_start <= reinterpret_cast<vm_address_t>(default_zone)); + vm_size_t zone_offset = reinterpret_cast<vm_address_t>(default_zone) - + reinterpret_cast<vm_address_t>(*reprotection_start); CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length); if (info.protection & VM_PROT_WRITE) { @@ -94,10 +96,10 @@ void DeprotectMallocZone(ChromeMallocZone* default_zone, *reprotection_value = VM_PROT_NONE; } else { *reprotection_value = info.protection; - result = mach_vm_protect(mach_task_self(), *reprotection_start, - *reprotection_length, false, - info.protection | VM_PROT_WRITE); - MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; + result = + vm_protect(mach_task_self(), *reprotection_start, *reprotection_length, + false, info.protection | VM_PROT_WRITE); + MACH_CHECK(result == KERN_SUCCESS, result) << "vm_protect"; } } @@ -211,7 +213,11 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone, // === Core Foundation CFAllocators === bool CanGetContextForCFAllocator() { +#if defined(OS_IOS) + return !base::ios::IsRunningOnOrLater(14, 0, 0); +#else return !base::mac::IsOSLaterThan10_15_DontCallThis(); +#endif } CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) { @@ -529,8 +535,8 @@ void ShimNewMallocZones() { void ReplaceZoneFunctions(ChromeMallocZone* zone, const MallocZoneFunctions* functions) { // Remove protection. - mach_vm_address_t reprotection_start = 0; - mach_vm_size_t reprotection_length = 0; + vm_address_t reprotection_start = 0; + vm_size_t reprotection_length = 0; vm_prot_t reprotection_value = VM_PROT_NONE; DeprotectMallocZone(zone, &reprotection_start, &reprotection_length, &reprotection_value); @@ -558,9 +564,9 @@ void ReplaceZoneFunctions(ChromeMallocZone* zone, // Restore protection if it was active. if (reprotection_start) { kern_return_t result = - mach_vm_protect(mach_task_self(), reprotection_start, - reprotection_length, false, reprotection_value); - MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; + vm_protect(mach_task_self(), reprotection_start, reprotection_length, + false, reprotection_value); + MACH_CHECK(result == KERN_SUCCESS, result) << "vm_protect"; } } diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc index ef42d5ad67f..320bca7e168 100644 --- a/chromium/base/allocator/allocator_shim.cc +++ b/chromium/base/allocator/allocator_shim.cc @@ -12,7 +12,7 @@ #include "base/allocator/buildflags.h" #include "base/atomicops.h" #include "base/bits.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/process/process_metrics.h" #include "base/threading/platform_thread.h" diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_winheap.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_winheap.cc index f36c6f0c8ea..7af1f4d66c2 100644 --- a/chromium/base/allocator/allocator_shim_default_dispatch_to_winheap.cc +++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_winheap.cc @@ -4,8 +4,10 @@ #include "base/allocator/allocator_shim.h" +#include <ostream> + #include "base/allocator/winheap_stubs_win.h" -#include "base/logging.h" +#include "base/check.h" namespace { diff --git a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h index c6cc306ee24..2e7b256dabc 100644 --- a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h +++ b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h @@ -5,9 +5,7 @@ // This header defines symbols to override the same functions in the Visual C++ // CRT implementation. -#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_ -#error This header is meant to be included only once by allocator_shim.cc -#endif +#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_ #define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_ #include <malloc.h> @@ -147,28 +145,6 @@ __declspec(restrict) void* _aligned_offset_recalloc(void* address, __builtin_unreachable(); } -// The symbols -// * __acrt_heap -// * __acrt_initialize_heap -// * __acrt_uninitialize_heap -// * _get_heap_handle -// must be overridden all or none, as they are otherwise supplied -// by heap_handle.obj in the ucrt.lib file. -HANDLE __acrt_heap = nullptr; - -bool __acrt_initialize_heap() { - __acrt_heap = ::HeapCreate(0, 0, 0); - return true; -} - -bool __acrt_uninitialize_heap() { - ::HeapDestroy(__acrt_heap); - __acrt_heap = nullptr; - return true; -} - -intptr_t _get_heap_handle(void) { - return reinterpret_cast<intptr_t>(__acrt_heap); -} - } // extern "C" + +#endif // BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_UCRT_SYMBOLS_WIN_H_ diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc index be42d81ffbc..5a8bdfc482d 100644 --- a/chromium/base/allocator/allocator_shim_unittest.cc +++ b/chromium/base/allocator/allocator_shim_unittest.cc @@ -61,7 +61,14 @@ constexpr size_t kTestSizeEstimate = 1234; class AllocatorShimTest : public testing::Test { public: +#if defined(OS_IOS) + // TODO(crbug.com/1077271): 64-bit iOS uses a page size that is larger than + // kSystemPageSize, causing this test to make larger allocations, relative to + // kSystemPageSize. + static const size_t kMaxSizeTracked = 6 * base::kSystemPageSize; +#else static const size_t kMaxSizeTracked = 2 * base::kSystemPageSize; +#endif AllocatorShimTest() : testing::Test() {} static size_t Hash(const void* ptr) { @@ -538,7 +545,7 @@ TEST_F(AllocatorShimTest, NewHandlerConcurrency) { #if defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM) TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) { - ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle())); + ASSERT_EQ(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle())); } #endif // defined(OS_WIN) && BUILDFLAG(USE_ALLOCATOR_SHIM) diff --git a/chromium/base/allocator/partition_allocator/OWNERS b/chromium/base/allocator/partition_allocator/OWNERS index b0a2a850f7b..816b0f2a7ab 100644 --- a/chromium/base/allocator/partition_allocator/OWNERS +++ b/chromium/base/allocator/partition_allocator/OWNERS @@ -1,5 +1,6 @@ ajwong@chromium.org haraken@chromium.org +lizeb@chromium.org palmer@chromium.org tsepez@chromium.org diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc index 625718bfc44..72078fdaa50 100644 --- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc +++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc @@ -7,7 +7,7 @@ #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/spin_lock.h" -#include "base/logging.h" +#include "base/check_op.h" #include "build/build_config.h" #if defined(OS_WIN) diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc index 34e6a5d65ed..081ce4591da 100644 --- a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc +++ b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc @@ -8,7 +8,7 @@ #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/random.h" -#include "base/logging.h" +#include "base/check_op.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc index f4950ad13bb..0d515b11463 100644 --- a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc +++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc @@ -13,6 +13,26 @@ namespace base { +namespace { + +template <bool thread_safe> +void Insert(std::set<internal::PartitionRootBase<thread_safe>*>* partitions, + internal::PartitionRootBase<thread_safe>* partition) { + DCHECK(partition); + auto it_and_whether_inserted = partitions->insert(partition); + DCHECK(it_and_whether_inserted.second); +} + +template <bool thread_safe> +void Remove(std::set<internal::PartitionRootBase<thread_safe>*>* partitions, + internal::PartitionRootBase<thread_safe>* partition) { + DCHECK(partition); + size_t erased_count = partitions->erase(partition); + DCHECK_EQ(1u, erased_count); +} + +} // namespace + constexpr TimeDelta PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta; // static @@ -22,19 +42,27 @@ PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() { } void PartitionAllocMemoryReclaimer::RegisterPartition( - internal::PartitionRootBase* partition) { + internal::PartitionRootBase<internal::ThreadSafe>* partition) { AutoLock lock(lock_); - DCHECK(partition); - auto it_and_whether_inserted = partitions_.insert(partition); - DCHECK(it_and_whether_inserted.second); + Insert(&thread_safe_partitions_, partition); +} + +void PartitionAllocMemoryReclaimer::RegisterPartition( + internal::PartitionRootBase<internal::NotThreadSafe>* partition) { + AutoLock lock(lock_); + Insert(&thread_unsafe_partitions_, partition); } void PartitionAllocMemoryReclaimer::UnregisterPartition( - internal::PartitionRootBase* partition) { + internal::PartitionRootBase<internal::ThreadSafe>* partition) { AutoLock lock(lock_); - DCHECK(partition); - size_t erased_count = partitions_.erase(partition); - DCHECK_EQ(1u, erased_count); + Remove(&thread_safe_partitions_, partition); +} + +void PartitionAllocMemoryReclaimer::UnregisterPartition( + internal::PartitionRootBase<internal::NotThreadSafe>* partition) { + AutoLock lock(lock_); + Remove(&thread_unsafe_partitions_, partition); } void PartitionAllocMemoryReclaimer::Start( @@ -44,7 +72,7 @@ void PartitionAllocMemoryReclaimer::Start( { AutoLock lock(lock_); - DCHECK(!partitions_.empty()); + DCHECK(!thread_safe_partitions_.empty()); } // This does not need to run on the main thread, however there are a few @@ -94,7 +122,9 @@ void PartitionAllocMemoryReclaimer::Reclaim() { { AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls. - for (auto* partition : partitions_) + for (auto* partition : thread_safe_partitions_) + partition->PurgeMemory(kFlags); + for (auto* partition : thread_unsafe_partitions_) partition->PurgeMemory(kFlags); } @@ -121,7 +151,8 @@ void PartitionAllocMemoryReclaimer::ResetForTesting() { has_called_reclaim_ = false; total_reclaim_thread_time_ = TimeDelta(); timer_ = nullptr; - partitions_.clear(); + thread_safe_partitions_.clear(); + thread_unsafe_partitions_.clear(); } } // namespace base diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.h b/chromium/base/allocator/partition_allocator/memory_reclaimer.h index 8d520db1156..4e51332dca6 100644 --- a/chromium/base/allocator/partition_allocator/memory_reclaimer.h +++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.h @@ -8,6 +8,7 @@ #include <memory> #include <set> +#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/bind.h" #include "base/callback.h" #include "base/location.h" @@ -20,12 +21,6 @@ namespace base { -namespace internal { - -struct PartitionRootBase; - -} // namespace internal - // Posts and handles memory reclaim tasks for PartitionAlloc. // // Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be @@ -41,10 +36,16 @@ class BASE_EXPORT PartitionAllocMemoryReclaimer { // Internal. Do not use. // Registers a partition to be tracked by the reclaimer. - void RegisterPartition(internal::PartitionRootBase* partition); + void RegisterPartition( + internal::PartitionRootBase<internal::ThreadSafe>* partition); + void RegisterPartition( + internal::PartitionRootBase<internal::NotThreadSafe>* partition); // Internal. Do not use. // Unregisters a partition to be tracked by the reclaimer. - void UnregisterPartition(internal::PartitionRootBase* partition); + void UnregisterPartition( + internal::PartitionRootBase<internal::ThreadSafe>* partition); + void UnregisterPartition( + internal::PartitionRootBase<internal::NotThreadSafe>* partition); // Starts the periodic reclaim. Should be called once. void Start(scoped_refptr<SequencedTaskRunner> task_runner); // Triggers an explicit reclaim now. @@ -67,7 +68,10 @@ class BASE_EXPORT PartitionAllocMemoryReclaimer { std::unique_ptr<RepeatingTimer> timer_; Lock lock_; - std::set<internal::PartitionRootBase*> partitions_ GUARDED_BY(lock_); + std::set<internal::PartitionRootBase<internal::ThreadSafe>*> + thread_safe_partitions_ GUARDED_BY(lock_); + std::set<internal::PartitionRootBase<internal::NotThreadSafe>*> + thread_unsafe_partitions_ GUARDED_BY(lock_); friend class NoDestructor<PartitionAllocMemoryReclaimer>; friend class PartitionAllocMemoryReclaimerTest; diff --git a/chromium/base/allocator/partition_allocator/oom_callback.cc b/chromium/base/allocator/partition_allocator/oom_callback.cc index 2e22e109016..c734458acbb 100644 --- a/chromium/base/allocator/partition_allocator/oom_callback.cc +++ b/chromium/base/allocator/partition_allocator/oom_callback.cc @@ -4,7 +4,7 @@ #include "base/allocator/partition_allocator/oom_callback.h" -#include "base/logging.h" +#include "base/check.h" namespace base { diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc index 28d90fe9b8f..b7785505efc 100644 --- a/chromium/base/allocator/partition_allocator/page_allocator.cc +++ b/chromium/base/allocator/partition_allocator/page_allocator.cc @@ -12,7 +12,7 @@ #include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/spin_lock.h" #include "base/bits.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/no_destructor.h" #include "base/numerics/checked_math.h" #include "build/build_config.h" diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc index 11602606817..a186840ffe3 100644 --- a/chromium/base/allocator/partition_allocator/partition_alloc.cc +++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc @@ -13,12 +13,27 @@ #include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/spin_lock.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/no_destructor.h" #include "base/synchronization/lock.h" namespace base { +namespace { + +template <bool thread_safe> +bool InitializeOnce() { + // We mark the sentinel bucket/page as free to make sure it is skipped by + // our logic to find a new active page. + internal::PartitionBucket<thread_safe>::get_sentinel_bucket() + ->active_pages_head = + internal::PartitionPage<thread_safe>::get_sentinel_page(); + + return true; +} + +} // namespace + // Two partition pages are used as guard / metadata page so make sure the super // page size is bigger. static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size"); @@ -29,13 +44,16 @@ static_assert(kSystemPageSize * 4 <= kPartitionPageSize, "ok partition page size"); static_assert(!(kPartitionPageSize % kSystemPageSize), "ok partition page multiple"); -static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize, +static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <= + kPageMetadataSize, "PartitionPage should not be too big"); -static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize, - "PartitionBucket should not be too big"); -static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <= +static_assert(sizeof(internal::PartitionBucket<internal::ThreadSafe>) <= kPageMetadataSize, - "PartitionSuperPageExtentEntry should not be too big"); + "PartitionBucket should not be too big"); +static_assert( + sizeof(internal::PartitionSuperPageExtentEntry<internal::ThreadSafe>) <= + kPageMetadataSize, + "PartitionSuperPageExtentEntry should not be too big"); static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <= kSystemPageSize, "page metadata fits in hole"); @@ -49,26 +67,17 @@ static_assert(kGenericMaxBucketed == 983040, "generic max bucketed"); static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8), "System pages per slot span must be less than 128."); -internal::PartitionRootBase::PartitionRootBase() = default; -internal::PartitionRootBase::~PartitionRootBase() = default; PartitionRoot::PartitionRoot() = default; PartitionRoot::~PartitionRoot() = default; PartitionRootGeneric::PartitionRootGeneric() = default; PartitionRootGeneric::~PartitionRootGeneric() = default; PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default; -Lock& GetLock() { - static NoDestructor<Lock> s_initialized_lock; - return *s_initialized_lock; -} -static bool g_initialized = false; - Lock& GetHooksLock() { static NoDestructor<Lock> lock; return *lock; } -OomFunction internal::PartitionRootBase::g_oom_handling_function = nullptr; std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false); std::atomic<PartitionAllocHooks::AllocationObserverHook*> PartitionAllocHooks::allocation_observer_hook_(nullptr); @@ -173,28 +182,22 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out, return false; } -static void PartitionAllocBaseInit(internal::PartitionRootBase* root) { +template <bool thread_safe> +static void PartitionAllocBaseInit( + internal::PartitionRootBase<thread_safe>* root) { DCHECK(!root->initialized); - { - AutoLock guard(GetLock()); - if (!g_initialized) { - g_initialized = true; - // We mark the sentinel bucket/page as free to make sure it is skipped by - // our logic to find a new active page. - internal::PartitionBucket::get_sentinel_bucket()->active_pages_head = - internal::PartitionPage::get_sentinel_page(); - } - } - root->initialized = true; + static bool intialized = InitializeOnce<thread_safe>(); + static_cast<void>(intialized); // This is a "magic" value so we can test if a root pointer is valid. root->inverted_self = ~reinterpret_cast<uintptr_t>(root); + root->initialized = true; } void PartitionAllocGlobalInit(OomFunction on_out_of_memory) { DCHECK(on_out_of_memory); - internal::PartitionRootBase::g_oom_handling_function = on_out_of_memory; + internal::g_oom_handling_function = on_out_of_memory; } void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) { @@ -203,13 +206,13 @@ void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) { num_buckets = bucket_count; max_allocation = maximum_allocation; for (size_t i = 0; i < num_buckets; ++i) { - internal::PartitionBucket& bucket = buckets()[i]; + Bucket& bucket = buckets()[i]; bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift)); } } void PartitionRootGeneric::Init() { - subtle::SpinLock::Guard guard(lock); + ScopedGuard guard{lock_}; PartitionAllocBaseInit(this); @@ -250,11 +253,11 @@ void PartitionRootGeneric::Init() { size_t current_size = kGenericSmallestBucket; size_t current_increment = kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; - internal::PartitionBucket* bucket = &buckets[0]; + Bucket* bucket = &buckets[0]; for (i = 0; i < kGenericNumBucketedOrders; ++i) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { bucket->Init(current_size); - // Disable psuedo buckets so that touching them faults. + // Disable pseudo buckets so that touching them faults. if (current_size % kGenericSmallestBucket) bucket->active_pages_head = nullptr; current_size += current_increment; @@ -267,16 +270,16 @@ void PartitionRootGeneric::Init() { // Then set up the fast size -> bucket lookup table. bucket = &buckets[0]; - internal::PartitionBucket** bucket_ptr = &bucket_lookups[0]; + Bucket** bucket_ptr = &bucket_lookups[0]; for (order = 0; order <= kBitsPerSizeT; ++order) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { if (order < kGenericMinBucketedOrder) { // Use the bucket of the finest granularity for malloc(0) etc. *bucket_ptr++ = &buckets[0]; } else if (order > kGenericMaxBucketedOrder) { - *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket(); + *bucket_ptr++ = Bucket::get_sentinel_bucket(); } else { - internal::PartitionBucket* valid_bucket = bucket; + Bucket* valid_bucket = bucket; // Skip over invalid buckets. while (valid_bucket->slot_size % kGenericSmallestBucket) valid_bucket++; @@ -289,31 +292,34 @@ void PartitionRootGeneric::Init() { DCHECK(bucket_ptr == &bucket_lookups[0] + ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); // And there's one last bucket lookup that will be hit for e.g. malloc(-1), - // which tries to overflow to a non-existant order. - *bucket_ptr = internal::PartitionBucket::get_sentinel_bucket(); + // which tries to overflow to a non-existent order. + *bucket_ptr = Bucket::get_sentinel_bucket(); } -bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root, - internal::PartitionPage* page, - size_t raw_size) { +bool PartitionReallocDirectMappedInPlace( + PartitionRootGeneric* root, + internal::PartitionPage<internal::ThreadSafe>* page, + size_t raw_size) EXCLUSIVE_LOCKS_REQUIRED(root->lock_) { DCHECK(page->bucket->is_direct_mapped()); raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size); // Note that the new size might be a bucketed size; this function is called // whenever we're reallocating a direct mapped allocation. - size_t new_size = internal::PartitionBucket::get_direct_map_size(raw_size); + size_t new_size = PartitionRootGeneric::Bucket::get_direct_map_size(raw_size); if (new_size < kGenericMinDirectMappedDownsize) return false; // bucket->slot_size is the current size of the allocation. size_t current_size = page->bucket->slot_size; - char* char_ptr = static_cast<char*>(internal::PartitionPage::ToPointer(page)); + char* char_ptr = + static_cast<char*>(PartitionRootGeneric::Page::ToPointer(page)); if (new_size == current_size) { // No need to move any memory around, but update size and cookie below. } else if (new_size < current_size) { size_t map_size = - internal::PartitionDirectMapExtent::FromPage(page)->map_size; + internal::PartitionDirectMapExtent<internal::ThreadSafe>::FromPage(page) + ->map_size; // Don't reallocate in-place if new size is less than 80 % of the full // map size, to avoid holding on to too much unused address space. @@ -325,7 +331,9 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root, root->DecommitSystemPages(char_ptr + new_size, decommit_size); SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible); } else if (new_size <= - internal::PartitionDirectMapExtent::FromPage(page)->map_size) { + internal::PartitionDirectMapExtent<internal::ThreadSafe>::FromPage( + page) + ->map_size) { // Grow within the actually allocated memory. Just need to make the // pages accessible again. size_t recommit_size = new_size - current_size; @@ -386,22 +394,27 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root, &actual_old_size, ptr); } if (LIKELY(!overridden)) { - internal::PartitionPage* page = internal::PartitionPage::FromPointer( + PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer( internal::PartitionCookieFreePointerAdjust(ptr)); - // TODO(palmer): See if we can afford to make this a CHECK. - DCHECK(root->IsValidPage(page)); - - if (UNLIKELY(page->bucket->is_direct_mapped())) { - // We may be able to perform the realloc in place by changing the - // accessibility of memory pages and, if reducing the size, decommitting - // them. - if (PartitionReallocDirectMappedInPlace(root, page, new_size)) { - if (UNLIKELY(hooks_enabled)) { - PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size, - type_name); - } - return ptr; + bool success = false; + { + PartitionRootGeneric::ScopedGuard guard{root->lock_}; + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(root->IsValidPage(page)); + + if (UNLIKELY(page->bucket->is_direct_mapped())) { + // We may be able to perform the realloc in place by changing the + // accessibility of memory pages and, if reducing the size, decommitting + // them. + success = PartitionReallocDirectMappedInPlace(root, page, new_size); + } + } + if (success) { + if (UNLIKELY(hooks_enabled)) { + PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size, + type_name); } + return ptr; } const size_t actual_new_size = root->ActualSize(new_size); @@ -441,7 +454,7 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root, root->Free(ptr); return ret; #endif -} +} // namespace base void* PartitionRootGeneric::Realloc(void* ptr, size_t new_size, @@ -456,8 +469,10 @@ void* PartitionRootGeneric::TryRealloc(void* ptr, new_size, type_name); } -static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { - const internal::PartitionBucket* bucket = page->bucket; +template <bool thread_safe> +static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page, + bool discard) { + const internal::PartitionBucket<thread_safe>* bucket = page->bucket; size_t slot_size = bucket->slot_size; if (slot_size < kSystemPageSize || !page->num_allocated_slots) return 0; @@ -470,8 +485,8 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { uint32_t used_bytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size)); discardable_bytes = bucket->slot_size - used_bytes; if (discardable_bytes && discard) { - char* ptr = - reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page)); + char* ptr = reinterpret_cast<char*>( + internal::PartitionPage<thread_safe>::ToPointer(page)); ptr += used_bytes; DiscardSystemPages(ptr, discardable_bytes); } @@ -490,7 +505,8 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { size_t last_slot = static_cast<size_t>(-1); #endif memset(slot_usage, 1, num_slots); - char* ptr = reinterpret_cast<char*>(internal::PartitionPage::ToPointer(page)); + char* ptr = reinterpret_cast<char*>( + internal::PartitionPage<thread_safe>::ToPointer(page)); // First, walk the freelist for this page and make a bitmap of which slots // are not in use. for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry; @@ -603,18 +619,21 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) { return discardable_bytes; } -static void PartitionPurgeBucket(internal::PartitionBucket* bucket) { +template <bool thread_safe> +static void PartitionPurgeBucket( + internal::PartitionBucket<thread_safe>* bucket) { if (bucket->active_pages_head != - internal::PartitionPage::get_sentinel_page()) { - for (internal::PartitionPage* page = bucket->active_pages_head; page; - page = page->next_page) { - DCHECK(page != internal::PartitionPage::get_sentinel_page()); + internal::PartitionPage<thread_safe>::get_sentinel_page()) { + for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head; + page; page = page->next_page) { + DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page()); PartitionPurgePage(page, true); } } } void PartitionRoot::PurgeMemory(int flags) { + ScopedGuard guard{lock_}; if (flags & PartitionPurgeDecommitEmptyPages) DecommitEmptyPages(); // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages @@ -624,20 +643,21 @@ void PartitionRoot::PurgeMemory(int flags) { } void PartitionRootGeneric::PurgeMemory(int flags) { - subtle::SpinLock::Guard guard(lock); + ScopedGuard guard{lock_}; if (flags & PartitionPurgeDecommitEmptyPages) DecommitEmptyPages(); if (flags & PartitionPurgeDiscardUnusedSystemPages) { for (size_t i = 0; i < kGenericNumBuckets; ++i) { - internal::PartitionBucket* bucket = &buckets[i]; + Bucket* bucket = &buckets[i]; if (bucket->slot_size >= kSystemPageSize) PartitionPurgeBucket(bucket); } } } +template <bool thread_safe> static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, - internal::PartitionPage* page) { + internal::PartitionPage<thread_safe>* page) { uint16_t bucket_num_slots = page->bucket->get_slots_per_span(); if (page->is_decommitted()) { @@ -670,15 +690,17 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, } } -static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, - const internal::PartitionBucket* bucket) { +template <bool thread_safe> +static void PartitionDumpBucketStats( + PartitionBucketMemoryStats* stats_out, + const internal::PartitionBucket<thread_safe>* bucket) { DCHECK(!bucket->is_direct_mapped()); stats_out->is_valid = false; // If the active page list is empty (== // internal::PartitionPage::get_sentinel_page()), the bucket might still need // to be reported if it has a list of empty, decommitted or full pages. if (bucket->active_pages_head == - internal::PartitionPage::get_sentinel_page() && + internal::PartitionPage<thread_safe>::get_sentinel_page() && !bucket->empty_pages_head && !bucket->decommitted_pages_head && !bucket->num_full_pages) return; @@ -695,22 +717,23 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, stats_out->resident_bytes = bucket->num_full_pages * stats_out->allocated_page_size; - for (internal::PartitionPage* page = bucket->empty_pages_head; page; - page = page->next_page) { + for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head; + page; page = page->next_page) { DCHECK(page->is_empty() || page->is_decommitted()); PartitionDumpPageStats(stats_out, page); } - for (internal::PartitionPage* page = bucket->decommitted_pages_head; page; - page = page->next_page) { + for (internal::PartitionPage<thread_safe>* page = + bucket->decommitted_pages_head; + page; page = page->next_page) { DCHECK(page->is_decommitted()); PartitionDumpPageStats(stats_out, page); } if (bucket->active_pages_head != - internal::PartitionPage::get_sentinel_page()) { - for (internal::PartitionPage* page = bucket->active_pages_head; page; - page = page->next_page) { - DCHECK(page != internal::PartitionPage::get_sentinel_page()); + internal::PartitionPage<thread_safe>::get_sentinel_page()) { + for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head; + page; page = page->next_page) { + DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page()); PartitionDumpPageStats(stats_out, page); } } @@ -719,6 +742,7 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, void PartitionRootGeneric::DumpStats(const char* partition_name, bool is_light_dump, PartitionStatsDumper* dumper) { + ScopedGuard guard{lock_}; PartitionMemoryStats stats = {0}; stats.total_mmapped_bytes = total_size_of_super_pages + total_size_of_direct_mapped_pages; @@ -739,10 +763,8 @@ void PartitionRootGeneric::DumpStats(const char* partition_name, PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; size_t num_direct_mapped_allocations = 0; { - subtle::SpinLock::Guard guard(lock); - for (size_t i = 0; i < kGenericNumBuckets; ++i) { - const internal::PartitionBucket* bucket = &buckets[i]; + const Bucket* bucket = &buckets[i]; // Don't report the pseudo buckets that the generic allocator sets up in // order to preserve a fast size->bucket map (see // PartitionRootGeneric::Init() for details). @@ -758,7 +780,8 @@ void PartitionRootGeneric::DumpStats(const char* partition_name, } } - for (internal::PartitionDirectMapExtent* extent = direct_map_list; + for (internal::PartitionDirectMapExtent<internal::ThreadSafe>* extent = + direct_map_list; extent && num_direct_mapped_allocations < kMaxReportableDirectMaps; extent = extent->next_extent, ++num_direct_mapped_allocations) { DCHECK(!extent->next_extent || @@ -803,6 +826,8 @@ void PartitionRootGeneric::DumpStats(const char* partition_name, void PartitionRoot::DumpStats(const char* partition_name, bool is_light_dump, PartitionStatsDumper* dumper) { + ScopedGuard guard{lock_}; + PartitionMemoryStats stats = {0}; stats.total_mmapped_bytes = total_size_of_super_pages; stats.total_committed_bytes = total_size_of_committed_pages; @@ -811,8 +836,8 @@ void PartitionRoot::DumpStats(const char* partition_name, static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*); std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats; if (!is_light_dump) { - memory_stats = std::unique_ptr<PartitionBucketMemoryStats[]>( - new PartitionBucketMemoryStats[kMaxReportableBuckets]); + memory_stats = + std::make_unique<PartitionBucketMemoryStats[]>(kMaxReportableBuckets); } const size_t partition_num_buckets = num_buckets; diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h index 06ecc577675..ef635e78291 100644 --- a/chromium/base/allocator/partition_allocator/partition_alloc.h +++ b/chromium/base/allocator/partition_allocator/partition_alloc.h @@ -108,18 +108,19 @@ enum PartitionPurgeFlags { }; // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. -struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase { +struct BASE_EXPORT PartitionRoot + : public internal::PartitionRootBase<internal::NotThreadSafe> { PartitionRoot(); ~PartitionRoot() override; // This references the buckets OFF the edge of this struct. All uses of // PartitionRoot must have the bucket array come right after. // // The PartitionAlloc templated class ensures the following is correct. - ALWAYS_INLINE internal::PartitionBucket* buckets() { - return reinterpret_cast<internal::PartitionBucket*>(this + 1); + ALWAYS_INLINE Bucket* buckets() { + return reinterpret_cast<Bucket*>(this + 1); } - ALWAYS_INLINE const internal::PartitionBucket* buckets() const { - return reinterpret_cast<const internal::PartitionBucket*>(this + 1); + ALWAYS_INLINE const Bucket* buckets() const { + return reinterpret_cast<const Bucket*>(this + 1); } void Init(size_t bucket_count, size_t maximum_allocation); @@ -136,10 +137,10 @@ struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase { // Never instantiate a PartitionRootGeneric directly, instead use // PartitionAllocatorGeneric. -struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase { +struct BASE_EXPORT PartitionRootGeneric + : public internal::PartitionRootBase<internal::ThreadSafe> { PartitionRootGeneric(); ~PartitionRootGeneric() override; - subtle::SpinLock lock; // Some pre-computed constants. size_t order_index_shifts[kBitsPerSizeT + 1] = {}; size_t order_sub_index_masks[kBitsPerSizeT + 1] = {}; @@ -148,18 +149,15 @@ struct BASE_EXPORT PartitionRootGeneric : public internal::PartitionRootBase { // sizes. It is one flat array instead of a 2D array because in the 2D // world, we'd need to index array[blah][max+1] which risks undefined // behavior. - internal::PartitionBucket* - bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] = - {}; - internal::PartitionBucket buckets[kGenericNumBuckets] = {}; + Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + + 1] = {}; + Bucket buckets[kGenericNumBuckets] = {}; // Public API. void Init(); ALWAYS_INLINE void* Alloc(size_t size, const char* type_name); ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name); - ALWAYS_INLINE void Free(void* ptr); - NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name); // Overload that may return nullptr if reallocation isn't possible. In this // case, |ptr| remains valid. @@ -222,74 +220,6 @@ class BASE_EXPORT PartitionStatsDumper { BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory); -// PartitionAlloc supports setting hooks to observe allocations/frees as they -// occur as well as 'override' hooks that allow overriding those operations. -class BASE_EXPORT PartitionAllocHooks { - public: - // Log allocation and free events. - typedef void AllocationObserverHook(void* address, - size_t size, - const char* type_name); - typedef void FreeObserverHook(void* address); - - // If it returns true, the allocation has been overridden with the pointer in - // *out. - typedef bool AllocationOverrideHook(void** out, - int flags, - size_t size, - const char* type_name); - // If it returns true, then the allocation was overridden and has been freed. - typedef bool FreeOverrideHook(void* address); - // If it returns true, the underlying allocation is overridden and *out holds - // the size of the underlying allocation. - typedef bool ReallocOverrideHook(size_t* out, void* address); - - // To unhook, call Set*Hooks with nullptrs. - static void SetObserverHooks(AllocationObserverHook* alloc_hook, - FreeObserverHook* free_hook); - static void SetOverrideHooks(AllocationOverrideHook* alloc_hook, - FreeOverrideHook* free_hook, - ReallocOverrideHook realloc_hook); - - // Helper method to check whether hooks are enabled. This is an optimization - // so that if a function needs to call observer and override hooks in two - // different places this value can be cached and only loaded once. - static bool AreHooksEnabled() { - return hooks_enabled_.load(std::memory_order_relaxed); - } - - static void AllocationObserverHookIfEnabled(void* address, - size_t size, - const char* type_name); - static bool AllocationOverrideHookIfEnabled(void** out, - int flags, - size_t size, - const char* type_name); - - static void FreeObserverHookIfEnabled(void* address); - static bool FreeOverrideHookIfEnabled(void* address); - - static void ReallocObserverHookIfEnabled(void* old_address, - void* new_address, - size_t size, - const char* type_name); - static bool ReallocOverrideHookIfEnabled(size_t* out, void* address); - - private: - // Single bool that is used to indicate whether observer or allocation hooks - // are set to reduce the numbers of loads required to check whether hooking is - // enabled. - static std::atomic<bool> hooks_enabled_; - - // Lock used to synchronize Set*Hooks calls. - static std::atomic<AllocationObserverHook*> allocation_observer_hook_; - static std::atomic<FreeObserverHook*> free_observer_hook_; - - static std::atomic<AllocationOverrideHook*> allocation_override_hook_; - static std::atomic<FreeOverrideHook*> free_override_hook_; - static std::atomic<ReallocOverrideHook*> realloc_override_hook_; -}; - ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) { return AllocFlags(0, size, type_name); } @@ -320,8 +250,11 @@ ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags, size_t index = size >> kBucketShift; DCHECK(index < num_buckets); DCHECK(size == index << kBucketShift); - internal::PartitionBucket* bucket = &buckets()[index]; - result = AllocFromBucket(bucket, flags, size); + { + ScopedGuard guard{lock_}; + Bucket* bucket = &buckets()[index]; + result = AllocFromBucket(bucket, flags, size); + } if (UNLIKELY(hooks_enabled)) { PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size, type_name); @@ -343,44 +276,23 @@ ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) { // cause trouble, and the caller is responsible for that not happening. DCHECK(PartitionAllocSupportsGetSize()); ptr = internal::PartitionCookieFreePointerAdjust(ptr); - internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); + internal::PartitionPage<internal::ThreadSafe>* page = + internal::PartitionPage<internal::ThreadSafe>::FromPointer(ptr); // TODO(palmer): See if we can afford to make this a CHECK. - DCHECK(internal::PartitionRootBase::IsValidPage(page)); + DCHECK(internal::PartitionRootBase<internal::ThreadSafe>::IsValidPage(page)); size_t size = page->bucket->slot_size; return internal::PartitionCookieSizeAdjustSubtract(size); } -ALWAYS_INLINE void PartitionFree(void* ptr) { -#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) - free(ptr); -#else - // TODO(palmer): Check ptr alignment before continuing. Shall we do the check - // inside PartitionCookieFreePointerAdjust? - if (PartitionAllocHooks::AreHooksEnabled()) { - PartitionAllocHooks::FreeObserverHookIfEnabled(ptr); - if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr)) - return; - } - - ptr = internal::PartitionCookieFreePointerAdjust(ptr); - internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); - // TODO(palmer): See if we can afford to make this a CHECK. - DCHECK(internal::PartitionRootBase::IsValidPage(page)); - internal::DeferredUnmap deferred_unmap = page->Free(ptr); - deferred_unmap.Run(); -#endif -} - -ALWAYS_INLINE internal::PartitionBucket* PartitionGenericSizeToBucket( - PartitionRootGeneric* root, - size_t size) { +ALWAYS_INLINE internal::PartitionBucket<internal::ThreadSafe>* +PartitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size) { size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size); // The order index is simply the next few bits after the most significant bit. size_t order_index = (size >> root->order_index_shifts[order]) & (kGenericNumBucketsPerOrder - 1); // And if the remaining bits are non-zero we must bump the bucket up. size_t sub_order_index = size & root->order_sub_index_masks[order]; - internal::PartitionBucket* bucket = + internal::PartitionBucket<internal::ThreadSafe>* bucket = root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) + order_index + !!sub_order_index]; CHECK(bucket); @@ -417,9 +329,11 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, } size_t requested_size = size; size = internal::PartitionCookieSizeAdjustAdd(size); - internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); + internal::PartitionBucket<internal::ThreadSafe>* bucket = + PartitionGenericSizeToBucket(root, size); + DCHECK(bucket); { - subtle::SpinLock::Guard guard(root->lock); + PartitionRootGeneric::ScopedGuard guard{root->lock_}; result = root->AllocFromBucket(bucket, flags, size); } if (UNLIKELY(hooks_enabled)) { @@ -442,34 +356,6 @@ ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags, return PartitionAllocGenericFlags(this, flags, size, type_name); } -ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) { -#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) - free(ptr); -#else - DCHECK(initialized); - - if (UNLIKELY(!ptr)) - return; - - if (PartitionAllocHooks::AreHooksEnabled()) { - PartitionAllocHooks::FreeObserverHookIfEnabled(ptr); - if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr)) - return; - } - - ptr = internal::PartitionCookieFreePointerAdjust(ptr); - internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); - // TODO(palmer): See if we can afford to make this a CHECK. - DCHECK(IsValidPage(page)); - internal::DeferredUnmap deferred_unmap; - { - subtle::SpinLock::Guard guard(lock); - deferred_unmap = page->Free(ptr); - } - deferred_unmap.Run(); -#endif -} - BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root, int flags, void* ptr, @@ -482,13 +368,13 @@ ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) { #else DCHECK(initialized); size = internal::PartitionCookieSizeAdjustAdd(size); - internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size); + Bucket* bucket = PartitionGenericSizeToBucket(this, size); if (LIKELY(!bucket->is_direct_mapped())) { size = bucket->slot_size; } else if (size > kGenericMaxDirectMapped) { // Too large to allocate => return the size unchanged. } else { - size = internal::PartitionBucket::get_direct_map_size(size); + size = Bucket::get_direct_map_size(size); } return internal::PartitionCookieSizeAdjustSubtract(size); #endif @@ -499,7 +385,7 @@ class SizeSpecificPartitionAllocator { public: SizeSpecificPartitionAllocator() { memset(actual_buckets_, 0, - sizeof(internal::PartitionBucket) * base::size(actual_buckets_)); + sizeof(PartitionRoot::Bucket) * base::size(actual_buckets_)); } ~SizeSpecificPartitionAllocator() { PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition( @@ -516,7 +402,7 @@ class SizeSpecificPartitionAllocator { private: PartitionRoot partition_root_; - internal::PartitionBucket actual_buckets_[kNumBuckets]; + PartitionRoot::Bucket actual_buckets_[kNumBuckets]; }; class BASE_EXPORT PartitionAllocatorGeneric { diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_forward.h b/chromium/base/allocator/partition_allocator/partition_alloc_forward.h new file mode 100644 index 00000000000..c2019e511bf --- /dev/null +++ b/chromium/base/allocator/partition_allocator/partition_alloc_forward.h @@ -0,0 +1,22 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_ +#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_ + +namespace base { +namespace internal { + +template <bool thread_safe> +struct PartitionPage; + +constexpr bool ThreadSafe = true; +constexpr bool NotThreadSafe = false; +template <bool thread_safe> +struct PartitionRootBase; + +} // namespace internal +} // namespace base + +#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_ diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc index c2094cbe34c..e6fae94cfcc 100644 --- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc +++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc @@ -12,7 +12,6 @@ #include <vector> #include "base/allocator/partition_allocator/address_space_randomization.h" -#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/logging.h" #include "base/rand_util.h" #include "base/stl_util.h" @@ -28,12 +27,6 @@ #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) -// Because there is so much deep inspection of the internal objects, -// explicitly annotating the namespaces for commonly expected objects makes the -// code unreadable. Prefer using directives instead. -using base::internal::PartitionBucket; -using base::internal::PartitionPage; - namespace { constexpr size_t kTestMaxAllocation = base::kSystemPageSize; @@ -93,7 +86,7 @@ const size_t kTestSizes[] = { 100, base::kSystemPageSize, base::kSystemPageSize + 1, - base::internal::PartitionBucket::get_direct_map_size(100), + base::PartitionRootGeneric::Bucket::get_direct_map_size(100), 1 << 20, 1 << 21, }; @@ -110,9 +103,8 @@ void AllocateRandomly(base::PartitionRootGeneric* root, } for (size_t i = 0; i < count; ++i) { - if (allocations[i]) { - base::PartitionFree(allocations[i]); - } + if (allocations[i]) + root->Free(allocations[i]); } } @@ -152,10 +144,10 @@ class PartitionAllocTest : public testing::Test { generic_allocator.init(); } - PartitionPage* GetFullPage(size_t size) { + PartitionRoot::Page* GetFullPage(size_t size) { size_t real_size = size + kExtraAllocSize; size_t bucket_index = real_size >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + PartitionRoot::Bucket* bucket = &allocator.root()->buckets()[bucket_index]; size_t num_slots = (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size; void* first = nullptr; @@ -169,8 +161,8 @@ class PartitionAllocTest : public testing::Test { else if (i == num_slots - 1) last = PartitionCookieFreePointerAdjust(ptr); } - EXPECT_EQ(PartitionPage::FromPointer(first), - PartitionPage::FromPointer(last)); + EXPECT_EQ(PartitionRoot::Page::FromPointer(first), + PartitionRoot::Page::FromPointer(last)); if (bucket->num_system_pages_per_slot_span == kNumSystemPagesPerPartitionPage) EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, @@ -180,20 +172,20 @@ class PartitionAllocTest : public testing::Test { EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head); EXPECT_TRUE(bucket->active_pages_head); EXPECT_TRUE(bucket->active_pages_head != - PartitionPage::get_sentinel_page()); + PartitionRoot::Page::get_sentinel_page()); return bucket->active_pages_head; } void CycleFreeCache(size_t size) { size_t real_size = size + kExtraAllocSize; size_t bucket_index = real_size >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + PartitionRoot::Bucket* bucket = &allocator.root()->buckets()[bucket_index]; DCHECK(!bucket->active_pages_head->num_allocated_slots); for (size_t i = 0; i < kMaxFreeableSpans; ++i) { void* ptr = allocator.root()->Alloc(size, type_name); EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); - PartitionFree(ptr); + allocator.root()->Free(ptr); EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); } @@ -202,9 +194,10 @@ class PartitionAllocTest : public testing::Test { void CycleGenericFreeCache(size_t size) { for (size_t i = 0; i < kMaxFreeableSpans; ++i) { void* ptr = generic_allocator.root()->Alloc(size, type_name); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); - PartitionBucket* bucket = page->bucket; + PartitionRootGeneric::Page* page = + PartitionRootGeneric::Page::FromPointer( + PartitionCookieFreePointerAdjust(ptr)); + PartitionRootGeneric::Bucket* bucket = page->bucket; EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); generic_allocator.root()->Free(ptr); EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); @@ -301,15 +294,15 @@ class PartitionAllocDeathTest : public PartitionAllocTest {}; namespace { -void FreeFullPage(PartitionPage* page) { +void FreeFullPage(PartitionRoot* root, PartitionRoot::Page* page) { size_t size = page->bucket->slot_size; size_t num_slots = (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size; EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots))); - char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page)); + char* ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page)); size_t i; for (i = 0; i < num_slots; ++i) { - PartitionFree(ptr + kPointerOffset); + root->Free(ptr + kPointerOffset); ptr += size; } } @@ -382,8 +375,9 @@ class MockPartitionStatsDumper : public PartitionStatsDumper { // Check that the most basic of allocate / free pairs work. TEST_F(PartitionAllocTest, Basic) { - PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; - PartitionPage* seed_page = PartitionPage::get_sentinel_page(); + PartitionRoot::Bucket* bucket = + &allocator.root()->buckets()[kTestBucketIndex]; + PartitionRoot::Page* seed_page = PartitionRoot::Page::get_sentinel_page(); EXPECT_FALSE(bucket->empty_pages_head); EXPECT_FALSE(bucket->decommitted_pages_head); @@ -398,7 +392,7 @@ TEST_F(PartitionAllocTest, Basic) { EXPECT_EQ(kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask); - PartitionFree(ptr); + allocator.root()->Free(ptr); // Expect that the last active page gets noticed as empty but doesn't get // decommitted. EXPECT_TRUE(bucket->empty_pages_head); @@ -417,13 +411,13 @@ TEST_F(PartitionAllocTest, MultiAlloc) { EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); // Check that we re-use the just-freed slot. - PartitionFree(ptr2); + allocator.root()->Free(ptr2); ptr2 = reinterpret_cast<char*>( allocator.root()->Alloc(kTestAllocSize, type_name)); EXPECT_TRUE(ptr2); diff = ptr2 - ptr1; EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); - PartitionFree(ptr1); + allocator.root()->Free(ptr1); ptr1 = reinterpret_cast<char*>( allocator.root()->Alloc(kTestAllocSize, type_name)); EXPECT_TRUE(ptr1); @@ -436,46 +430,49 @@ TEST_F(PartitionAllocTest, MultiAlloc) { diff = ptr3 - ptr1; EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff); - PartitionFree(ptr1); - PartitionFree(ptr2); - PartitionFree(ptr3); + allocator.root()->Free(ptr1); + allocator.root()->Free(ptr2); + allocator.root()->Free(ptr3); } // Test a bucket with multiple pages. TEST_F(PartitionAllocTest, MultiPages) { - PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; + PartitionRoot::Bucket* bucket = + &allocator.root()->buckets()[kTestBucketIndex]; - PartitionPage* page = GetFullPage(kTestAllocSize); - FreeFullPage(page); + PartitionRoot::Page* page = GetFullPage(kTestAllocSize); + FreeFullPage(allocator.root(), page); EXPECT_TRUE(bucket->empty_pages_head); - EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head); + EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(), + bucket->active_pages_head); EXPECT_EQ(nullptr, page->next_page); EXPECT_EQ(0, page->num_allocated_slots); page = GetFullPage(kTestAllocSize); - PartitionPage* page2 = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize); EXPECT_EQ(page2, bucket->active_pages_head); EXPECT_EQ(nullptr, page2->next_page); - EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page)) & + EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionRoot::Page::ToPointer(page)) & kSuperPageBaseMask, - reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page2)) & + reinterpret_cast<uintptr_t>(PartitionRoot::Page::ToPointer(page2)) & kSuperPageBaseMask); // Fully free the non-current page. This will leave us with no current // active page because one is empty and the other is full. - FreeFullPage(page); + FreeFullPage(allocator.root(), page); EXPECT_EQ(0, page->num_allocated_slots); EXPECT_TRUE(bucket->empty_pages_head); - EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head); + EXPECT_EQ(PartitionPage<base::internal::NotThreadSafe>::get_sentinel_page(), + bucket->active_pages_head); // Allocate a new page, it should pull from the freelist. page = GetFullPage(kTestAllocSize); EXPECT_FALSE(bucket->empty_pages_head); EXPECT_EQ(page, bucket->active_pages_head); - FreeFullPage(page); - FreeFullPage(page2); + FreeFullPage(allocator.root(), page); + FreeFullPage(allocator.root(), page2); EXPECT_EQ(0, page->num_allocated_slots); EXPECT_EQ(0, page2->num_allocated_slots); EXPECT_EQ(0, page2->num_unprovisioned_slots); @@ -484,19 +481,20 @@ TEST_F(PartitionAllocTest, MultiPages) { // Test some finer aspects of internal page transitions. TEST_F(PartitionAllocTest, PageTransitions) { - PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; + PartitionRoot::Bucket* bucket = + &allocator.root()->buckets()[kTestBucketIndex]; - PartitionPage* page1 = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize); EXPECT_EQ(page1, bucket->active_pages_head); EXPECT_EQ(nullptr, page1->next_page); - PartitionPage* page2 = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize); EXPECT_EQ(page2, bucket->active_pages_head); EXPECT_EQ(nullptr, page2->next_page); // Bounce page1 back into the non-full list then fill it up again. - char* ptr = - reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset; - PartitionFree(ptr); + char* ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) + + kPointerOffset; + allocator.root()->Free(ptr); EXPECT_EQ(page1, bucket->active_pages_head); (void)allocator.root()->Alloc(kTestAllocSize, type_name); EXPECT_EQ(page1, bucket->active_pages_head); @@ -505,14 +503,14 @@ TEST_F(PartitionAllocTest, PageTransitions) { // Allocating another page at this point should cause us to scan over page1 // (which is both full and NOT our current page), and evict it from the // freelist. Older code had a O(n^2) condition due to failure to do this. - PartitionPage* page3 = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page3 = GetFullPage(kTestAllocSize); EXPECT_EQ(page3, bucket->active_pages_head); EXPECT_EQ(nullptr, page3->next_page); // Work out a pointer into page2 and free it. - ptr = - reinterpret_cast<char*>(PartitionPage::ToPointer(page2)) + kPointerOffset; - PartitionFree(ptr); + ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page2)) + + kPointerOffset; + allocator.root()->Free(ptr); // Trying to allocate at this time should cause us to cycle around to page2 // and find the recently freed slot. char* new_ptr = reinterpret_cast<char*>( @@ -523,9 +521,9 @@ TEST_F(PartitionAllocTest, PageTransitions) { // Work out a pointer into page1 and free it. This should pull the page // back into the list of available pages. - ptr = - reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset; - PartitionFree(ptr); + ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) + + kPointerOffset; + allocator.root()->Free(ptr); // This allocation should be satisfied by page1. new_ptr = reinterpret_cast<char*>( allocator.root()->Alloc(kTestAllocSize, type_name)); @@ -533,27 +531,29 @@ TEST_F(PartitionAllocTest, PageTransitions) { EXPECT_EQ(page1, bucket->active_pages_head); EXPECT_EQ(page2, page1->next_page); - FreeFullPage(page3); - FreeFullPage(page2); - FreeFullPage(page1); + FreeFullPage(allocator.root(), page3); + FreeFullPage(allocator.root(), page2); + FreeFullPage(allocator.root(), page1); // Allocating whilst in this state exposed a bug, so keep the test. ptr = reinterpret_cast<char*>( allocator.root()->Alloc(kTestAllocSize, type_name)); - PartitionFree(ptr); + allocator.root()->Free(ptr); } // Test some corner cases relating to page transitions in the internal // free page list metadata bucket. TEST_F(PartitionAllocTest, FreePageListPageTransitions) { - PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; + PartitionRoot::Bucket* bucket = + &allocator.root()->buckets()[kTestBucketIndex]; size_t num_to_fill_free_list_page = - kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize); + kPartitionPageSize / (sizeof(PartitionRoot::Page) + kExtraAllocSize); // The +1 is because we need to account for the fact that the current page // never gets thrown on the freelist. ++num_to_fill_free_list_page; - auto pages = std::make_unique<PartitionPage* []>(num_to_fill_free_list_page); + auto pages = + std::make_unique<PartitionRoot::Page*[]>(num_to_fill_free_list_page); size_t i; for (i = 0; i < num_to_fill_free_list_page; ++i) { @@ -561,17 +561,18 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) { } EXPECT_EQ(pages[num_to_fill_free_list_page - 1], bucket->active_pages_head); for (i = 0; i < num_to_fill_free_list_page; ++i) - FreeFullPage(pages[i]); - EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head); + FreeFullPage(allocator.root(), pages[i]); + EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(), + bucket->active_pages_head); EXPECT_TRUE(bucket->empty_pages_head); // Allocate / free in a different bucket size so we get control of a // different free page list. We need two pages because one will be the last // active page and not get freed. - PartitionPage* page1 = GetFullPage(kTestAllocSize * 2); - PartitionPage* page2 = GetFullPage(kTestAllocSize * 2); - FreeFullPage(page1); - FreeFullPage(page2); + PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize * 2); + PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize * 2); + FreeFullPage(allocator.root(), page1); + FreeFullPage(allocator.root(), page2); for (i = 0; i < num_to_fill_free_list_page; ++i) { pages[i] = GetFullPage(kTestAllocSize); @@ -579,8 +580,9 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) { EXPECT_EQ(pages[num_to_fill_free_list_page - 1], bucket->active_pages_head); for (i = 0; i < num_to_fill_free_list_page; ++i) - FreeFullPage(pages[i]); - EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head); + FreeFullPage(allocator.root(), pages[i]); + EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(), + bucket->active_pages_head); EXPECT_TRUE(bucket->empty_pages_head); } @@ -595,12 +597,12 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) { --num_pages_needed; EXPECT_GT(num_pages_needed, 1u); - auto pages = std::make_unique<PartitionPage* []>(num_pages_needed); + auto pages = std::make_unique<PartitionRoot::Page*[]>(num_pages_needed); uintptr_t first_super_page_base = 0; size_t i; for (i = 0; i < num_pages_needed; ++i) { pages[i] = GetFullPage(kTestAllocSize); - void* storage_ptr = PartitionPage::ToPointer(pages[i]); + void* storage_ptr = PartitionRoot::Page::ToPointer(pages[i]); if (!i) first_super_page_base = reinterpret_cast<uintptr_t>(storage_ptr) & kSuperPageBaseMask; @@ -615,7 +617,7 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) { } } for (i = 0; i < num_pages_needed; ++i) - FreeFullPage(pages[i]); + FreeFullPage(allocator.root(), pages[i]); } // Test the generic allocation functions that can handle arbitrary sizes and @@ -723,8 +725,8 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) { EXPECT_TRUE(ptr2); generic_allocator.root()->Free(ptr); // Should be freeable at this point. - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer( + PartitionCookieFreePointerAdjust(ptr)); EXPECT_NE(-1, page->empty_cache_index); generic_allocator.root()->Free(ptr2); @@ -742,9 +744,10 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) { void* ptr4 = generic_allocator.root()->Alloc(size, type_name); EXPECT_TRUE(ptr4); - page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); - PartitionPage* page2 = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr3)); + page = PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr)); + PartitionRootGeneric::Page* page2 = PartitionRootGeneric::Page::FromPointer( + PartitionCookieFreePointerAdjust(ptr3)); EXPECT_NE(page, page2); generic_allocator.root()->Free(ptr); @@ -867,8 +870,8 @@ TEST_F(PartitionAllocTest, Realloc) { void* ptr = generic_allocator.root()->Realloc(nullptr, kTestAllocSize, type_name); memset(ptr, 'A', kTestAllocSize); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer( + PartitionCookieFreePointerAdjust(ptr)); // realloc(ptr, 0) should be equivalent to free(). void* ptr2 = generic_allocator.root()->Realloc(ptr, 0, type_name); EXPECT_EQ(nullptr, ptr2); @@ -932,14 +935,15 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { EXPECT_EQ(kSystemPageSize - kAllocationGranularity, big_size + kExtraAllocSize); size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + PartitionBucket<base::internal::NotThreadSafe>* bucket = + &allocator.root()->buckets()[bucket_index]; EXPECT_EQ(nullptr, bucket->empty_pages_head); void* ptr = allocator.root()->Alloc(big_size, type_name); EXPECT_TRUE(ptr); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + PartitionRoot::Page* page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); size_t total_slots = (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / (big_size + kExtraAllocSize); @@ -972,21 +976,21 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { void* ptr5 = allocator.root()->Alloc(big_size, type_name); EXPECT_TRUE(ptr5); - PartitionPage* page2 = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr5)); + PartitionRoot::Page* page2 = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr5)); EXPECT_EQ(1, page2->num_allocated_slots); // Churn things a little whilst there's a partial page freelist. - PartitionFree(ptr); + allocator.root()->Free(ptr); ptr = allocator.root()->Alloc(big_size, type_name); void* ptr6 = allocator.root()->Alloc(big_size, type_name); - PartitionFree(ptr); - PartitionFree(ptr2); - PartitionFree(ptr3); - PartitionFree(ptr4); - PartitionFree(ptr5); - PartitionFree(ptr6); + allocator.root()->Free(ptr); + allocator.root()->Free(ptr2); + allocator.root()->Free(ptr3); + allocator.root()->Free(ptr4); + allocator.root()->Free(ptr5); + allocator.root()->Free(ptr6); EXPECT_NE(-1, page->empty_cache_index); EXPECT_NE(-1, page2->empty_cache_index); EXPECT_TRUE(page2->freelist_head); @@ -1001,7 +1005,8 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { ptr = allocator.root()->Alloc(mediumSize, type_name); EXPECT_TRUE(ptr); - page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(1, page->num_allocated_slots); total_slots = (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / @@ -1010,7 +1015,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { EXPECT_EQ(2u, first_page_slots); EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots); - PartitionFree(ptr); + allocator.root()->Free(ptr); size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize; bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift; @@ -1019,7 +1024,8 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { ptr = allocator.root()->Alloc(smallSize, type_name); EXPECT_TRUE(ptr); - page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(1, page->num_allocated_slots); total_slots = (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / @@ -1027,7 +1033,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { first_page_slots = kSystemPageSize / (smallSize + kExtraAllocSize); EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots); - PartitionFree(ptr); + allocator.root()->Free(ptr); EXPECT_TRUE(page->freelist_head); EXPECT_EQ(0, page->num_allocated_slots); @@ -1038,7 +1044,8 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { ptr = allocator.root()->Alloc(verySmallSize, type_name); EXPECT_TRUE(ptr); - page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(1, page->num_allocated_slots); total_slots = (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / @@ -1046,7 +1053,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { first_page_slots = kSystemPageSize / (verySmallSize + kExtraAllocSize); EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots); - PartitionFree(ptr); + allocator.root()->Free(ptr); EXPECT_TRUE(page->freelist_head); EXPECT_EQ(0, page->num_allocated_slots); @@ -1056,7 +1063,8 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize; ptr = generic_allocator.root()->Alloc(page_and_a_half_size, type_name); EXPECT_TRUE(ptr); - page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(1, page->num_allocated_slots); EXPECT_TRUE(page->freelist_head); total_slots = @@ -1069,7 +1077,8 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { size_t pageSize = kSystemPageSize - kExtraAllocSize; ptr = generic_allocator.root()->Alloc(pageSize, type_name); EXPECT_TRUE(ptr); - page = PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(1, page->num_allocated_slots); EXPECT_FALSE(page->freelist_head); total_slots = @@ -1081,26 +1090,27 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { // Test some of the fragmentation-resistant properties of the allocator. TEST_F(PartitionAllocTest, PageRefilling) { - PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; + PartitionRoot::Bucket* bucket = + &allocator.root()->buckets()[kTestBucketIndex]; // Grab two full pages and a non-full page. - PartitionPage* page1 = GetFullPage(kTestAllocSize); - PartitionPage* page2 = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize); void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name); EXPECT_TRUE(ptr); EXPECT_NE(page1, bucket->active_pages_head); EXPECT_NE(page2, bucket->active_pages_head); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + PartitionRoot::Page* page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(1, page->num_allocated_slots); // Work out a pointer into page2 and free it; and then page1 and free it. - char* ptr2 = - reinterpret_cast<char*>(PartitionPage::ToPointer(page1)) + kPointerOffset; - PartitionFree(ptr2); - ptr2 = - reinterpret_cast<char*>(PartitionPage::ToPointer(page2)) + kPointerOffset; - PartitionFree(ptr2); + char* ptr2 = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) + + kPointerOffset; + allocator.root()->Free(ptr2); + ptr2 = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page2)) + + kPointerOffset; + allocator.root()->Free(ptr2); // If we perform two allocations from the same bucket now, we expect to // refill both the nearly full pages. @@ -1108,16 +1118,16 @@ TEST_F(PartitionAllocTest, PageRefilling) { (void)allocator.root()->Alloc(kTestAllocSize, type_name); EXPECT_EQ(1, page->num_allocated_slots); - FreeFullPage(page2); - FreeFullPage(page1); - PartitionFree(ptr); + FreeFullPage(allocator.root(), page2); + FreeFullPage(allocator.root(), page1); + allocator.root()->Free(ptr); } // Basic tests to ensure that allocations work for partial page buckets. TEST_F(PartitionAllocTest, PartialPages) { // Find a size that is backed by a partial partition page. size_t size = sizeof(void*); - PartitionBucket* bucket = nullptr; + PartitionRoot::Bucket* bucket = nullptr; while (size < kTestMaxAllocation) { bucket = &allocator.root()->buckets()[size >> kBucketShift]; if (bucket->num_system_pages_per_slot_span % @@ -1127,10 +1137,10 @@ TEST_F(PartitionAllocTest, PartialPages) { } EXPECT_LT(size, kTestMaxAllocation); - PartitionPage* page1 = GetFullPage(size); - PartitionPage* page2 = GetFullPage(size); - FreeFullPage(page2); - FreeFullPage(page1); + PartitionRoot::Page* page1 = GetFullPage(size); + PartitionRoot::Page* page2 = GetFullPage(size); + FreeFullPage(allocator.root(), page2); + FreeFullPage(allocator.root(), page1); } // Test correct handling if our mapping collides with another. @@ -1139,16 +1149,16 @@ TEST_F(PartitionAllocTest, MappingCollision) { // guard pages. size_t num_partition_pages_needed = kNumPartitionPagesPerSuperPage - 2; auto first_super_page_pages = - std::make_unique<PartitionPage* []>(num_partition_pages_needed); + std::make_unique<PartitionRoot::Page*[]>(num_partition_pages_needed); auto second_super_page_pages = - std::make_unique<PartitionPage* []>(num_partition_pages_needed); + std::make_unique<PartitionRoot::Page*[]>(num_partition_pages_needed); size_t i; for (i = 0; i < num_partition_pages_needed; ++i) first_super_page_pages[i] = GetFullPage(kTestAllocSize); char* page_base = reinterpret_cast<char*>( - PartitionPage::ToPointer(first_super_page_pages[0])); + PartitionRoot::Page::ToPointer(first_super_page_pages[0])); EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask); page_base -= kPartitionPageSize; @@ -1170,7 +1180,7 @@ TEST_F(PartitionAllocTest, MappingCollision) { FreePages(map2, kPageAllocationGranularity); page_base = reinterpret_cast<char*>( - PartitionPage::ToPointer(second_super_page_pages[0])); + PartitionRoot::Page::ToPointer(second_super_page_pages[0])); EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask); page_base -= kPartitionPageSize; @@ -1189,32 +1199,32 @@ TEST_F(PartitionAllocTest, MappingCollision) { EXPECT_TRUE(TrySetSystemPagesAccess(map2, kPageAllocationGranularity, PageInaccessible)); - PartitionPage* page_in_third_super_page = GetFullPage(kTestAllocSize); + PartitionRoot::Page* page_in_third_super_page = GetFullPage(kTestAllocSize); FreePages(map1, kPageAllocationGranularity); FreePages(map2, kPageAllocationGranularity); EXPECT_EQ(0u, reinterpret_cast<uintptr_t>( - PartitionPage::ToPointer(page_in_third_super_page)) & + PartitionRoot::Page::ToPointer(page_in_third_super_page)) & kPartitionPageOffsetMask); // And make sure we really did get a page in a new superpage. EXPECT_NE(reinterpret_cast<uintptr_t>( - PartitionPage::ToPointer(first_super_page_pages[0])) & + PartitionRoot::Page::ToPointer(first_super_page_pages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>( - PartitionPage::ToPointer(page_in_third_super_page)) & + PartitionRoot::Page::ToPointer(page_in_third_super_page)) & kSuperPageBaseMask); EXPECT_NE(reinterpret_cast<uintptr_t>( - PartitionPage::ToPointer(second_super_page_pages[0])) & + PartitionRoot::Page::ToPointer(second_super_page_pages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>( - PartitionPage::ToPointer(page_in_third_super_page)) & + PartitionRoot::Page::ToPointer(page_in_third_super_page)) & kSuperPageBaseMask); - FreeFullPage(page_in_third_super_page); + FreeFullPage(allocator.root(), page_in_third_super_page); for (i = 0; i < num_partition_pages_needed; ++i) { - FreeFullPage(first_super_page_pages[i]); - FreeFullPage(second_super_page_pages[i]); + FreeFullPage(allocator.root(), first_super_page_pages[i]); + FreeFullPage(allocator.root(), second_super_page_pages[i]); } } @@ -1224,17 +1234,18 @@ TEST_F(PartitionAllocTest, FreeCache) { size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + PartitionBucket<base::internal::NotThreadSafe>* bucket = + &allocator.root()->buckets()[bucket_index]; void* ptr = allocator.root()->Alloc(big_size, type_name); EXPECT_TRUE(ptr); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); + PartitionRoot::Page* page = + PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr)); EXPECT_EQ(nullptr, bucket->empty_pages_head); EXPECT_EQ(1, page->num_allocated_slots); EXPECT_EQ(kPartitionPageSize, allocator.root()->total_size_of_committed_pages); - PartitionFree(ptr); + allocator.root()->Free(ptr); EXPECT_EQ(0, page->num_allocated_slots); EXPECT_NE(-1, page->empty_cache_index); EXPECT_TRUE(page->freelist_head); @@ -1245,7 +1256,7 @@ TEST_F(PartitionAllocTest, FreeCache) { EXPECT_FALSE(page->freelist_head); EXPECT_EQ(-1, page->empty_cache_index); EXPECT_EQ(0, page->num_allocated_slots); - PartitionBucket* cycle_free_cache_bucket = + PartitionBucket<base::internal::NotThreadSafe>* cycle_free_cache_bucket = &allocator.root()->buckets()[kTestBucketIndex]; EXPECT_EQ( cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize, @@ -1255,14 +1266,14 @@ TEST_F(PartitionAllocTest, FreeCache) { // as the active pages head). ptr = allocator.root()->Alloc(big_size, type_name); EXPECT_FALSE(bucket->empty_pages_head); - PartitionFree(ptr); + allocator.root()->Free(ptr); // Also check that a page that is bouncing immediately between empty and // used does not get freed. for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) { ptr = allocator.root()->Alloc(big_size, type_name); EXPECT_TRUE(page->freelist_head); - PartitionFree(ptr); + allocator.root()->Free(ptr); EXPECT_TRUE(page->freelist_head); } EXPECT_EQ(kPartitionPageSize, @@ -1278,11 +1289,13 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) { void* ptr2 = generic_allocator.root()->Alloc(size, type_name); EXPECT_TRUE(ptr2); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr)); - PartitionPage* page2 = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr2)); - PartitionBucket* bucket = page->bucket; + PartitionPage<base::internal::ThreadSafe>* page = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr)); + PartitionPage<base::internal::ThreadSafe>* page2 = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr2)); + PartitionBucket<base::internal::ThreadSafe>* bucket = page->bucket; EXPECT_EQ(nullptr, bucket->empty_pages_head); EXPECT_EQ(-1, page->num_allocated_slots); @@ -1305,14 +1318,16 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) { EXPECT_TRUE(bucket->empty_pages_head); EXPECT_TRUE(bucket->empty_pages_head->next_page); - EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head); + EXPECT_EQ(PartitionPage<base::internal::ThreadSafe>::get_sentinel_page(), + bucket->active_pages_head); // At this moment, we have two decommitted pages, on the empty list. ptr = generic_allocator.root()->Alloc(size, type_name); EXPECT_TRUE(ptr); generic_allocator.root()->Free(ptr); - EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head); + EXPECT_EQ(PartitionPage<base::internal::ThreadSafe>::get_sentinel_page(), + bucket->active_pages_head); EXPECT_TRUE(bucket->empty_pages_head); EXPECT_TRUE(bucket->decommitted_pages_head); @@ -1496,7 +1511,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { allocator.root()->DumpStats("mock_allocator", false /* detailed dump */, &mock_stats_dumper); EXPECT_TRUE(mock_stats_dumper.IsMemoryAllocationRecorded()); - PartitionFree(ptr); + allocator.root()->Free(ptr); } // This series of tests checks the active -> empty -> decommitted states. @@ -1808,15 +1823,18 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) { void* ptr5 = generic_allocator.root()->Alloc(size, type_name); void* ptr6 = generic_allocator.root()->Alloc(size, type_name); - PartitionPage* page1 = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1)); - PartitionPage* page2 = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr3)); - PartitionPage* page3 = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr6)); + PartitionPage<base::internal::ThreadSafe>* page1 = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr1)); + PartitionPage<base::internal::ThreadSafe>* page2 = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr3)); + PartitionPage<base::internal::ThreadSafe>* page3 = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr6)); EXPECT_NE(page1, page2); EXPECT_NE(page2, page3); - PartitionBucket* bucket = page1->bucket; + PartitionBucket<base::internal::ThreadSafe>* bucket = page1->bucket; EXPECT_EQ(page3, bucket->active_pages_head); // Free up the 2nd slot in each slot span. @@ -1853,8 +1871,9 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { char* ptr2 = reinterpret_cast<char*>(generic_allocator.root()->Alloc( kSystemPageSize - kExtraAllocSize, type_name)); generic_allocator.root()->Free(ptr2); - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1)); + PartitionPage<base::internal::ThreadSafe>* page = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr1)); EXPECT_EQ(2u, page->num_unprovisioned_slots); { MockPartitionStatsDumper dumper; @@ -2049,8 +2068,9 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ptr1[kSystemPageSize] = 'A'; ptr1[kSystemPageSize * 2] = 'A'; ptr1[kSystemPageSize * 3] = 'A'; - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1)); + PartitionPage<base::internal::ThreadSafe>* page = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr1)); generic_allocator.root()->Free(ptr2); generic_allocator.root()->Free(ptr4); generic_allocator.root()->Free(ptr1); @@ -2115,8 +2135,9 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ptr1[kSystemPageSize] = 'A'; ptr1[kSystemPageSize * 2] = 'A'; ptr1[kSystemPageSize * 3] = 'A'; - PartitionPage* page = - PartitionPage::FromPointer(PartitionCookieFreePointerAdjust(ptr1)); + PartitionPage<base::internal::ThreadSafe>* page = + PartitionPage<base::internal::ThreadSafe>::FromPointer( + PartitionCookieFreePointerAdjust(ptr1)); generic_allocator.root()->Free(ptr4); generic_allocator.root()->Free(ptr3); EXPECT_EQ(0u, page->num_unprovisioned_slots); @@ -2200,7 +2221,7 @@ TEST_F(PartitionAllocTest, ZeroFill) { } EXPECT_EQ(kAllZerosSentinel, non_zero_position) << "test allocation size: " << size; - PartitionFree(p); + generic_allocator.root()->Free(p); } for (int i = 0; i < 10; ++i) { @@ -2224,7 +2245,7 @@ TEST_F(PartitionAllocTest, Bug_897585) { kDesiredSize, nullptr); ASSERT_NE(nullptr, ptr); memset(ptr, 0xbd, kDesiredSize); - PartitionFree(ptr); + generic_allocator.root()->Free(ptr); } TEST_F(PartitionAllocTest, OverrideHooks) { @@ -2266,7 +2287,7 @@ TEST_F(PartitionAllocTest, OverrideHooks) { kOverriddenSize, kOverriddenType); ASSERT_EQ(ptr, overridden_allocation); - PartitionFree(ptr); + generic_allocator.root()->Free(ptr); EXPECT_TRUE(free_called); // overridden_allocation has not actually been freed so we can now immediately @@ -2278,7 +2299,7 @@ TEST_F(PartitionAllocTest, OverrideHooks) { EXPECT_NE(ptr, overridden_allocation); EXPECT_TRUE(free_called); EXPECT_EQ(*(char*)ptr, kOverriddenChar); - PartitionFree(ptr); + generic_allocator.root()->Free(ptr); PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr); free(overridden_allocation); diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.cc b/chromium/base/allocator/partition_allocator/partition_bucket.cc index 0ff8661878b..a52efccbf6a 100644 --- a/chromium/base/allocator/partition_allocator/partition_bucket.cc +++ b/chromium/base/allocator/partition_allocator/partition_bucket.cc @@ -11,7 +11,7 @@ #include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_root_base.h" -#include "base/logging.h" +#include "base/check.h" #include "build/build_config.h" namespace base { @@ -19,10 +19,12 @@ namespace internal { namespace { -ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, - int flags, - size_t raw_size) { - size_t size = PartitionBucket::get_direct_map_size(raw_size); +template <bool thread_safe> +ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap( + PartitionRootBase<thread_safe>* root, + int flags, + size_t raw_size) { + size_t size = PartitionBucket<thread_safe>::get_direct_map_size(raw_size); // Because we need to fake looking like a super page, we need to allocate // a bunch of system pages more than "size": @@ -58,17 +60,17 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, SetSystemPagesAccess(slot + size, kSystemPageSize, PageInaccessible); #endif - PartitionSuperPageExtentEntry* extent = - reinterpret_cast<PartitionSuperPageExtentEntry*>( - PartitionSuperPageToMetadataArea(ptr)); + auto* extent = reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>( + PartitionSuperPageToMetadataArea(ptr)); extent->root = root; // The new structures are all located inside a fresh system page so they // will all be zeroed out. These DCHECKs are for documentation. DCHECK(!extent->super_page_base); DCHECK(!extent->super_pages_end); DCHECK(!extent->next); - PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(slot); - PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( + PartitionPage<thread_safe>* page = + PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(slot); + auto* bucket = reinterpret_cast<PartitionBucket<thread_safe>*>( reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); DCHECK(!page->next_page); DCHECK(!page->num_allocated_slots); @@ -88,8 +90,8 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, DCHECK(!bucket->num_full_pages); bucket->slot_size = size; - PartitionDirectMapExtent* map_extent = - PartitionDirectMapExtent::FromPage(page); + PartitionDirectMapExtent<thread_safe>* map_extent = + PartitionDirectMapExtent<thread_safe>::FromPage(page); map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize; map_extent->bucket = bucket; @@ -106,9 +108,12 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, } // namespace // static -PartitionBucket PartitionBucket::sentinel_bucket_; +template <bool thread_safe> +PartitionBucket<thread_safe> PartitionBucket<thread_safe>::sentinel_bucket_; -PartitionBucket* PartitionBucket::get_sentinel_bucket() { +template <bool thread_safe> +PartitionBucket<thread_safe>* +PartitionBucket<thread_safe>::get_sentinel_bucket() { return &sentinel_bucket_; } @@ -120,7 +125,8 @@ PartitionBucket* PartitionBucket::get_sentinel_bucket() { // TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover // both used and unsed pages. // http://crbug.com/776537 -uint8_t PartitionBucket::get_system_pages_per_slot_span() { +template <bool thread_safe> +uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() { // This works out reasonably for the current bucket sizes of the generic // allocator, and the current values of partition page size and constants. // Specifically, we have enough room to always pack the slots perfectly into @@ -175,21 +181,24 @@ uint8_t PartitionBucket::get_system_pages_per_slot_span() { return static_cast<uint8_t>(best_pages); } -void PartitionBucket::Init(uint32_t new_slot_size) { +template <bool thread_safe> +void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) { slot_size = new_slot_size; - active_pages_head = PartitionPage::get_sentinel_page(); + active_pages_head = PartitionPage<thread_safe>::get_sentinel_page(); empty_pages_head = nullptr; decommitted_pages_head = nullptr; num_full_pages = 0; num_system_pages_per_slot_span = get_system_pages_per_slot_span(); } -NOINLINE void PartitionBucket::OnFull() { +template <bool thread_safe> +NOINLINE void PartitionBucket<thread_safe>::OnFull() { OOM_CRASH(0); } -ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan( - PartitionRootBase* root, +template <bool thread_safe> +ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan( + PartitionRootBase<thread_safe>* root, int flags, uint16_t num_partition_pages) { DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) % @@ -270,8 +279,8 @@ ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan( // We allocated a new super page so update super page metadata. // First check if this is a new extent or not. - PartitionSuperPageExtentEntry* latest_extent = - reinterpret_cast<PartitionSuperPageExtentEntry*>( + auto* latest_extent = + reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>( PartitionSuperPageToMetadataArea(super_page)); // By storing the root in every extent metadata object, we have a fast way // to go from a pointer within the partition to the root object. @@ -283,7 +292,8 @@ ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan( latest_extent->super_pages_end = nullptr; latest_extent->next = nullptr; - PartitionSuperPageExtentEntry* current_extent = root->current_extent; + PartitionSuperPageExtentEntry<thread_safe>* current_extent = + root->current_extent; bool is_new_extent = (super_page != requested_address); if (UNLIKELY(is_new_extent)) { if (UNLIKELY(!current_extent)) { @@ -307,14 +317,17 @@ ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan( return ret; } -ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() { +template <bool thread_safe> +ALWAYS_INLINE uint16_t PartitionBucket<thread_safe>::get_pages_per_slot_span() { // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage. return (num_system_pages_per_slot_span + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage; } -ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) { +template <bool thread_safe> +ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan( + PartitionPage<thread_safe>* page) { // The bucket never changes. We set it up once. page->bucket = this; page->empty_cache_index = -1; @@ -331,14 +344,16 @@ ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) { char* page_char_ptr = reinterpret_cast<char*>(page); for (uint16_t i = 1; i < num_partition_pages; ++i) { page_char_ptr += kPageMetadataSize; - PartitionPage* secondary_page = - reinterpret_cast<PartitionPage*>(page_char_ptr); + auto* secondary_page = + reinterpret_cast<PartitionPage<thread_safe>*>(page_char_ptr); secondary_page->page_offset = i; } } -ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { - DCHECK(page != PartitionPage::get_sentinel_page()); +template <bool thread_safe> +ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist( + PartitionPage<thread_safe>* page) { + DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page()); uint16_t num_slots = page->num_unprovisioned_slots; DCHECK(num_slots); // We should only get here when _every_ slot is either used or unprovisioned. @@ -350,7 +365,8 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { DCHECK(page->num_allocated_slots >= 0); size_t size = slot_size; - char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page)); + char* base = + reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page)); char* return_object = base + (size * page->num_allocated_slots); char* first_freelist_pointer = return_object + size; char* first_freelist_pointer_extent = @@ -387,12 +403,11 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { if (LIKELY(num_new_freelist_entries)) { char* freelist_pointer = first_freelist_pointer; - PartitionFreelistEntry* entry = - reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); + auto* entry = reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); page->freelist_head = entry; while (--num_new_freelist_entries) { freelist_pointer += size; - PartitionFreelistEntry* next_entry = + auto* next_entry = reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); entry->next = PartitionFreelistEntry::Encode(next_entry); entry = next_entry; @@ -404,12 +419,13 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { return return_object; } -bool PartitionBucket::SetNewActivePage() { - PartitionPage* page = active_pages_head; - if (page == PartitionPage::get_sentinel_page()) +template <bool thread_safe> +bool PartitionBucket<thread_safe>::SetNewActivePage() { + PartitionPage<thread_safe>* page = active_pages_head; + if (page == PartitionPage<thread_safe>::get_sentinel_page()) return false; - PartitionPage* next_page; + PartitionPage<thread_safe>* next_page; for (; page; page = next_page) { next_page = page->next_page; @@ -447,18 +463,20 @@ bool PartitionBucket::SetNewActivePage() { } } - active_pages_head = PartitionPage::get_sentinel_page(); + active_pages_head = PartitionPage<thread_safe>::get_sentinel_page(); return false; } -void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, - int flags, - size_t size, - bool* is_already_zeroed) { +template <bool thread_safe> +void* PartitionBucket<thread_safe>::SlowPathAlloc( + PartitionRootBase<thread_safe>* root, + int flags, + size_t size, + bool* is_already_zeroed) { // The slow path is called when the freelist is empty. DCHECK(!active_pages_head->freelist_head); - PartitionPage* new_page = nullptr; + PartitionPage<thread_safe>* new_page = nullptr; *is_already_zeroed = false; // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets @@ -474,7 +492,8 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, if (UNLIKELY(is_direct_mapped())) { DCHECK(size > kGenericMaxBucketed); DCHECK(this == get_sentinel_bucket()); - DCHECK(active_pages_head == PartitionPage::get_sentinel_page()); + DCHECK(active_pages_head == + PartitionPage<thread_safe>::get_sentinel_page()); if (size > kGenericMaxDirectMapped) { if (return_null) return nullptr; @@ -509,7 +528,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, DCHECK(new_page->bucket == this); DCHECK(new_page->is_decommitted()); decommitted_pages_head = new_page->next_page; - void* addr = PartitionPage::ToPointer(new_page); + void* addr = PartitionPage<thread_safe>::ToPointer(new_page); root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span()); new_page->Reset(); // TODO(https://crbug.com/890752): Optimizing here might cause pages to @@ -522,7 +541,8 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, uint16_t num_partition_pages = get_pages_per_slot_span(); void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages); if (LIKELY(raw_pages != nullptr)) { - new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages); + new_page = + PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(raw_pages); InitializeSlotSpan(new_page); // TODO(https://crbug.com/890752): Optimizing here causes pages to not be // zeroed on at least macOS. @@ -532,7 +552,8 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, // Bail if we had a memory allocation failure. if (UNLIKELY(!new_page)) { - DCHECK(active_pages_head == PartitionPage::get_sentinel_page()); + DCHECK(active_pages_head == + PartitionPage<thread_safe>::get_sentinel_page()); if (return_null) return nullptr; root->OutOfMemory(size); @@ -561,5 +582,8 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, return AllocAndFillFreelist(new_page); } +template struct PartitionBucket<ThreadSafe>; +template struct PartitionBucket<NotThreadSafe>; + } // namespace internal } // namespace base diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.h b/chromium/base/allocator/partition_allocator/partition_bucket.h index 7fa802abc88..608b81b0dd8 100644 --- a/chromium/base/allocator/partition_allocator/partition_bucket.h +++ b/chromium/base/allocator/partition_allocator/partition_bucket.h @@ -9,22 +9,22 @@ #include <stdint.h> #include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/base_export.h" #include "base/compiler_specific.h" #include "base/logging.h" +#include "base/thread_annotations.h" namespace base { namespace internal { -struct PartitionPage; -struct PartitionRootBase; - +template <bool thread_safe> struct PartitionBucket { // Accessed most in hot path => goes first. - PartitionPage* active_pages_head; + PartitionPage<thread_safe>* active_pages_head; - PartitionPage* empty_pages_head; - PartitionPage* decommitted_pages_head; + PartitionPage<thread_safe>* empty_pages_head; + PartitionPage<thread_safe>* decommitted_pages_head; uint32_t slot_size; uint32_t num_system_pages_per_slot_span : 8; uint32_t num_full_pages : 24; @@ -39,10 +39,11 @@ struct PartitionBucket { // them. (See |PartitionRootBase::AllocFromBucket|.) // // Note the matching Free() functions are in PartitionPage. - BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root, + BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase<thread_safe>* root, int flags, size_t size, - bool* is_already_zeroed); + bool* is_already_zeroed) + EXCLUSIVE_LOCKS_REQUIRED(root->lock_); ALWAYS_INLINE bool is_direct_mapped() const { return !num_system_pages_per_slot_span; @@ -82,9 +83,6 @@ struct PartitionBucket { bool SetNewActivePage(); private: - static void OutOfMemory(const PartitionRootBase* root); - static void OutOfMemoryWithLotsOfUncommitedPages(); - static NOINLINE void OnFull(); // Returns a natural number of PartitionPages (calculated by @@ -103,9 +101,10 @@ struct PartitionBucket { // Allocates a new slot span with size |num_partition_pages| from the // current extent. Metadata within this slot span will be uninitialized. // Returns nullptr on error. - ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root, + ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase<thread_safe>* root, int flags, - uint16_t num_partition_pages); + uint16_t num_partition_pages) + EXCLUSIVE_LOCKS_REQUIRED(root->lock_); // Each bucket allocates a slot span when it runs out of slots. // A slot span's size is equal to get_pages_per_slot_span() number of @@ -113,12 +112,12 @@ struct PartitionBucket { // span to point to the first PartitionPage which holds all the metadata // for the span and registers this bucket as the owner of the span. It does // NOT put the slots into the bucket's freelist. - ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page); + ALWAYS_INLINE void InitializeSlotSpan(PartitionPage<thread_safe>* page); // Allocates one slot from the given |page| and then adds the remainder to // the current bucket. If the |page| was freshly allocated, it must have been // passed through InitializeSlotSpan() first. - ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page); + ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage<thread_safe>* page); static PartitionBucket sentinel_bucket_; }; diff --git a/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h b/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h index 815560da8e5..c9b6d13b6f0 100644 --- a/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h +++ b/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h @@ -12,19 +12,23 @@ namespace base { namespace internal { +template <bool thread_safe> struct PartitionDirectMapExtent { - PartitionDirectMapExtent* next_extent; - PartitionDirectMapExtent* prev_extent; - PartitionBucket* bucket; + PartitionDirectMapExtent<thread_safe>* next_extent; + PartitionDirectMapExtent<thread_safe>* prev_extent; + PartitionBucket<thread_safe>* bucket; size_t map_size; // Mapped size, not including guard pages and meta-data. - ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page); + ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromPage( + PartitionPage<thread_safe>* page); }; -ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage( - PartitionPage* page) { +template <bool thread_safe> +ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>* +PartitionDirectMapExtent<thread_safe>::FromPage( + PartitionPage<thread_safe>* page) { DCHECK(page->bucket->is_direct_mapped()); - return reinterpret_cast<PartitionDirectMapExtent*>( + return reinterpret_cast<PartitionDirectMapExtent<thread_safe>*>( reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); } diff --git a/chromium/base/allocator/partition_allocator/partition_page.cc b/chromium/base/allocator/partition_allocator/partition_page.cc index bfb98950812..b094abc05b6 100644 --- a/chromium/base/allocator/partition_allocator/partition_page.cc +++ b/chromium/base/allocator/partition_allocator/partition_page.cc @@ -6,17 +6,20 @@ #include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_root_base.h" -#include "base/logging.h" +#include "base/check.h" namespace base { namespace internal { namespace { -ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) { - PartitionRootBase* root = PartitionRootBase::FromPage(page); - const PartitionDirectMapExtent* extent = - PartitionDirectMapExtent::FromPage(page); +template <bool thread_safe> +ALWAYS_INLINE DeferredUnmap +PartitionDirectUnmap(PartitionPage<thread_safe>* page) { + PartitionRootBase<thread_safe>* root = + PartitionRootBase<thread_safe>::FromPage(page); + const PartitionDirectMapExtent<thread_safe>* extent = + PartitionDirectMapExtent<thread_safe>::FromPage(page); size_t unmap_size = extent->map_size; // Maintain the doubly-linked list of all direct mappings. @@ -42,16 +45,21 @@ ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) { DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask)); - char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page)); + char* ptr = + reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page)); // Account for the mapping starting a partition page before the actual // allocation address. ptr -= kPartitionPageSize; return {ptr, unmap_size}; } -ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) { +template <bool thread_safe> +ALWAYS_INLINE void PartitionRegisterEmptyPage( + PartitionPage<thread_safe>* page) { DCHECK(page->is_empty()); - PartitionRootBase* root = PartitionRootBase::FromPage(page); + PartitionRootBase<thread_safe>* root = + PartitionRootBase<thread_safe>::FromPage(page); + root->lock_.AssertAcquired(); // If the page is already registered as empty, give it another life. if (page->empty_cache_index != -1) { @@ -62,7 +70,8 @@ ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) { } int16_t current_index = root->global_empty_page_ring_index; - PartitionPage* page_to_decommit = root->global_empty_page_ring[current_index]; + PartitionPage<thread_safe>* page_to_decommit = + root->global_empty_page_ring[current_index]; // The page might well have been re-activated, filled up, etc. before we get // around to looking at it here. if (page_to_decommit) @@ -83,13 +92,17 @@ ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) { } // namespace // static -PartitionPage PartitionPage::sentinel_page_; +template <bool thread_safe> +PartitionPage<thread_safe> PartitionPage<thread_safe>::sentinel_page_; -PartitionPage* PartitionPage::get_sentinel_page() { +// static +template <bool thread_safe> +PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() { return &sentinel_page_; } -DeferredUnmap PartitionPage::FreeSlowPath() { +template <bool thread_safe> +DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() { DCHECK(this != get_sentinel_page()); if (LIKELY(num_allocated_slots == 0)) { // Page became fully unused. @@ -133,7 +146,10 @@ DeferredUnmap PartitionPage::FreeSlowPath() { return {}; } -void PartitionPage::Decommit(PartitionRootBase* root) { +template <bool thread_safe> +void PartitionPage<thread_safe>::Decommit( + PartitionRootBase<thread_safe>* root) { + root->lock_.AssertAcquired(); DCHECK(is_empty()); DCHECK(!bucket->is_direct_mapped()); void* addr = PartitionPage::ToPointer(this); @@ -150,7 +166,10 @@ void PartitionPage::Decommit(PartitionRootBase* root) { DCHECK(is_decommitted()); } -void PartitionPage::DecommitIfPossible(PartitionRootBase* root) { +template <bool thread_safe> +void PartitionPage<thread_safe>::DecommitIfPossible( + PartitionRootBase<thread_safe>* root) { + root->lock_.AssertAcquired(); DCHECK(empty_cache_index >= 0); DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans); DCHECK(this == root->global_empty_page_ring[empty_cache_index]); @@ -163,5 +182,8 @@ void DeferredUnmap::Unmap() { FreePages(ptr, size); } +template struct PartitionPage<ThreadSafe>; +template struct PartitionPage<NotThreadSafe>; + } // namespace internal } // namespace base diff --git a/chromium/base/allocator/partition_allocator/partition_page.h b/chromium/base/allocator/partition_allocator/partition_page.h index a4849b15bc0..cc04e78fb50 100644 --- a/chromium/base/allocator/partition_allocator/partition_page.h +++ b/chromium/base/allocator/partition_allocator/partition_page.h @@ -8,17 +8,17 @@ #include <string.h> #include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_cookie.h" #include "base/allocator/partition_allocator/partition_freelist_entry.h" #include "base/allocator/partition_allocator/random.h" #include "base/logging.h" +#include "base/thread_annotations.h" namespace base { namespace internal { -struct PartitionRootBase; - // PartitionPage::Free() defers unmapping a large page until the lock is // released. Callers of PartitionPage::Free() must invoke Run(). // TODO(1061437): Reconsider once the new locking mechanism is implemented. @@ -63,10 +63,11 @@ struct DeferredUnmap { // similar. If so, all uses of the term "page" in comments, member variables, // local variables, and documentation that refer to this concept should be // updated. +template <bool thread_safe> struct PartitionPage { PartitionFreelistEntry* freelist_head; - PartitionPage* next_page; - PartitionBucket* bucket; + PartitionPage<thread_safe>* next_page; + PartitionBucket<thread_safe>* bucket; // Deliberately signed, 0 for empty or decommitted page, -n for full pages: int16_t num_allocated_slots; uint16_t num_unprovisioned_slots; @@ -80,8 +81,8 @@ struct PartitionPage { BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT; ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT; - void Decommit(PartitionRootBase* root); - void DecommitIfPossible(PartitionRootBase* root); + void Decommit(PartitionRootBase<thread_safe>* root); + void DecommitIfPossible(PartitionRootBase<thread_safe>* root); // Pointer manipulation functions. These must be static as the input |page| // pointer may be the result of an offset calculation and therefore cannot @@ -128,7 +129,7 @@ struct PartitionPage { // namespace so the getter can be fully inlined. static PartitionPage sentinel_page_; }; -static_assert(sizeof(PartitionPage) <= kPageMetadataSize, +static_assert(sizeof(PartitionPage<ThreadSafe>) <= kPageMetadataSize, "PartitionPage must be able to fit in a metadata slot"); ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) { @@ -139,8 +140,9 @@ ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) { return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize); } -ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck( - void* ptr) { +template <bool thread_safe> +ALWAYS_INLINE PartitionPage<thread_safe>* +PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) { uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr); char* super_page_ptr = reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask); @@ -150,7 +152,7 @@ ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck( // the last index is invalid because it is a guard page. DCHECK(partition_page_index); DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); - PartitionPage* page = reinterpret_cast<PartitionPage*>( + auto* page = reinterpret_cast<PartitionPage*>( PartitionSuperPageToMetadataArea(super_page_ptr) + (partition_page_index << kPageMetadataShift)); // Partition pages in the same slot span can share the same page object. @@ -161,8 +163,10 @@ ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck( return page; } -// Resturns start of the slot span for the PartitionPage. -ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) { +// Returns: start of the slot span for the PartitionPage. +template <bool thread_safe> +ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer( + const PartitionPage<thread_safe>* page) { uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page); uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask); @@ -186,7 +190,9 @@ ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) { return ret; } -ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) { +template <bool thread_safe> +ALWAYS_INLINE PartitionPage<thread_safe>* +PartitionPage<thread_safe>::FromPointer(void* ptr) { PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr); // Checks that the pointer is a multiple of bucket size. DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - @@ -195,7 +201,9 @@ ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) { return page; } -ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const { +template <bool thread_safe> +ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr() + const { // For single-slot buckets which span more than one partition page, we // have some spare metadata space to store the raw allocation size. We // can use this to report better statistics. @@ -209,15 +217,19 @@ ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const { return reinterpret_cast<const size_t*>(&the_next_page->freelist_head); } -ALWAYS_INLINE size_t PartitionPage::get_raw_size() const { +template <bool thread_safe> +ALWAYS_INLINE size_t PartitionPage<thread_safe>::get_raw_size() const { const size_t* ptr = get_raw_size_ptr(); if (UNLIKELY(ptr != nullptr)) return *ptr; return 0; } -ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) { +template <bool thread_safe> +ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) { #if DCHECK_IS_ON() + PartitionRootBase<thread_safe>::FromPage(this)->lock_.AssertAcquired(); + size_t slot_size = bucket->slot_size; const size_t raw_size = get_raw_size(); if (raw_size) { @@ -238,8 +250,7 @@ ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) { // Look for double free one level deeper in debug. DCHECK(!freelist_head || ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next)); - internal::PartitionFreelistEntry* entry = - static_cast<internal::PartitionFreelistEntry*>(ptr); + auto* entry = static_cast<internal::PartitionFreelistEntry*>(ptr); entry->next = internal::PartitionFreelistEntry::Encode(freelist_head); freelist_head = entry; --num_allocated_slots; @@ -253,14 +264,16 @@ ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) { return {}; } -ALWAYS_INLINE bool PartitionPage::is_active() const { +template <bool thread_safe> +ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const { DCHECK(this != get_sentinel_page()); DCHECK(!page_offset); return (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots)); } -ALWAYS_INLINE bool PartitionPage::is_full() const { +template <bool thread_safe> +ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const { DCHECK(this != get_sentinel_page()); DCHECK(!page_offset); bool ret = (num_allocated_slots == bucket->get_slots_per_span()); @@ -271,13 +284,15 @@ ALWAYS_INLINE bool PartitionPage::is_full() const { return ret; } -ALWAYS_INLINE bool PartitionPage::is_empty() const { +template <bool thread_safe> +ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const { DCHECK(this != get_sentinel_page()); DCHECK(!page_offset); return (!num_allocated_slots && freelist_head); } -ALWAYS_INLINE bool PartitionPage::is_decommitted() const { +template <bool thread_safe> +ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const { DCHECK(this != get_sentinel_page()); DCHECK(!page_offset); bool ret = (!num_allocated_slots && !freelist_head); @@ -288,13 +303,15 @@ ALWAYS_INLINE bool PartitionPage::is_decommitted() const { return ret; } -ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) { +template <bool thread_safe> +ALWAYS_INLINE void PartitionPage<thread_safe>::set_raw_size(size_t size) { size_t* raw_size_ptr = get_raw_size_ptr(); if (UNLIKELY(raw_size_ptr != nullptr)) *raw_size_ptr = size; } -ALWAYS_INLINE void PartitionPage::Reset() { +template <bool thread_safe> +ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() { DCHECK(is_decommitted()); num_unprovisioned_slots = bucket->get_slots_per_span(); diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.cc b/chromium/base/allocator/partition_allocator/partition_root_base.cc index 6e1442f7b79..c55a166b415 100644 --- a/chromium/base/allocator/partition_allocator/partition_root_base.cc +++ b/chromium/base/allocator/partition_allocator/partition_root_base.cc @@ -12,7 +12,8 @@ namespace base { namespace internal { -NOINLINE void PartitionRootBase::OutOfMemory(size_t size) { +template <bool thread_safety> +NOINLINE void PartitionRootBase<thread_safety>::OutOfMemory(size_t size) { #if !defined(ARCH_CPU_64_BITS) // Check whether this OOM is due to a lot of super pages that are allocated // but not committed, probably due to http://crbug.com/421387. @@ -22,19 +23,28 @@ NOINLINE void PartitionRootBase::OutOfMemory(size_t size) { PartitionOutOfMemoryWithLotsOfUncommitedPages(size); } #endif - if (PartitionRootBase::g_oom_handling_function) - (*PartitionRootBase::g_oom_handling_function)(size); + if (g_oom_handling_function) + (*g_oom_handling_function)(size); OOM_CRASH(size); } -void PartitionRootBase::DecommitEmptyPages() { +template <bool thread_safe> +void PartitionRootBase<thread_safe>::DecommitEmptyPages() { for (size_t i = 0; i < kMaxFreeableSpans; ++i) { - internal::PartitionPage* page = global_empty_page_ring[i]; + Page* page = global_empty_page_ring[i]; if (page) page->DecommitIfPossible(this); global_empty_page_ring[i] = nullptr; } } +template <bool thread_safe> +internal::PartitionRootBase<thread_safe>::PartitionRootBase() = default; +template <bool thread_safe> +internal::PartitionRootBase<thread_safe>::~PartitionRootBase() = default; + +template struct PartitionRootBase<ThreadSafe>; +template struct PartitionRootBase<NotThreadSafe>; + } // namespace internal } // namespace base diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.h b/chromium/base/allocator/partition_allocator/partition_root_base.h index 42c1d8d787d..de9551c71d5 100644 --- a/chromium/base/allocator/partition_allocator/partition_root_base.h +++ b/chromium/base/allocator/partition_allocator/partition_root_base.h @@ -7,37 +7,176 @@ #include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h" +#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_page.h" +#include "base/allocator/partition_allocator/spin_lock.h" #include "base/logging.h" +#include "base/no_destructor.h" +#include "base/synchronization/lock.h" +#include "base/thread_annotations.h" #include "build/build_config.h" namespace base { typedef void (*OomFunction)(size_t); +// PartitionAlloc supports setting hooks to observe allocations/frees as they +// occur as well as 'override' hooks that allow overriding those operations. +class BASE_EXPORT PartitionAllocHooks { + public: + // Log allocation and free events. + typedef void AllocationObserverHook(void* address, + size_t size, + const char* type_name); + typedef void FreeObserverHook(void* address); + + // If it returns true, the allocation has been overridden with the pointer in + // *out. + typedef bool AllocationOverrideHook(void** out, + int flags, + size_t size, + const char* type_name); + // If it returns true, then the allocation was overridden and has been freed. + typedef bool FreeOverrideHook(void* address); + // If it returns true, the underlying allocation is overridden and *out holds + // the size of the underlying allocation. + typedef bool ReallocOverrideHook(size_t* out, void* address); + + // To unhook, call Set*Hooks with nullptrs. + static void SetObserverHooks(AllocationObserverHook* alloc_hook, + FreeObserverHook* free_hook); + static void SetOverrideHooks(AllocationOverrideHook* alloc_hook, + FreeOverrideHook* free_hook, + ReallocOverrideHook realloc_hook); + + // Helper method to check whether hooks are enabled. This is an optimization + // so that if a function needs to call observer and override hooks in two + // different places this value can be cached and only loaded once. + static bool AreHooksEnabled() { + return hooks_enabled_.load(std::memory_order_relaxed); + } + + static void AllocationObserverHookIfEnabled(void* address, + size_t size, + const char* type_name); + static bool AllocationOverrideHookIfEnabled(void** out, + int flags, + size_t size, + const char* type_name); + + static void FreeObserverHookIfEnabled(void* address); + static bool FreeOverrideHookIfEnabled(void* address); + + static void ReallocObserverHookIfEnabled(void* old_address, + void* new_address, + size_t size, + const char* type_name); + static bool ReallocOverrideHookIfEnabled(size_t* out, void* address); + + private: + // Single bool that is used to indicate whether observer or allocation hooks + // are set to reduce the numbers of loads required to check whether hooking is + // enabled. + static std::atomic<bool> hooks_enabled_; + + // Lock used to synchronize Set*Hooks calls. + static std::atomic<AllocationObserverHook*> allocation_observer_hook_; + static std::atomic<FreeObserverHook*> free_observer_hook_; + + static std::atomic<AllocationOverrideHook*> allocation_override_hook_; + static std::atomic<FreeOverrideHook*> free_override_hook_; + static std::atomic<ReallocOverrideHook*> realloc_override_hook_; +}; + namespace internal { -struct PartitionPage; -struct PartitionRootBase; +template <bool thread_safe> +class LOCKABLE MaybeSpinLock { + public: + void Lock() EXCLUSIVE_LOCK_FUNCTION() {} + void Unlock() UNLOCK_FUNCTION() {} + void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {} +}; + +template <bool thread_safe> +class SCOPED_LOCKABLE ScopedGuard { + public: + explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock) + EXCLUSIVE_LOCK_FUNCTION(lock) + : lock_(lock) { + lock_.Lock(); + } + ~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); } + + private: + MaybeSpinLock<thread_safe>& lock_; +}; + +#if DCHECK_IS_ON() +template <> +class LOCKABLE MaybeSpinLock<ThreadSafe> { + public: + MaybeSpinLock() : lock_() {} + void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_->Acquire(); } + void Unlock() UNLOCK_FUNCTION() { lock_->Release(); } + void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() { + lock_->AssertAcquired(); + } + + private: + // NoDestructor to avoid issues with the "static destruction order fiasco". + // + // This also means that for DCHECK_IS_ON() builds we leak a lock when a + // partition is destructed. This will in practice only show in some tests, as + // partitons are not destructed in regular use. In addition, on most + // platforms, base::Lock doesn't allocate memory and neither does the OS + // library, and the destructor is a no-op. + base::NoDestructor<base::Lock> lock_; +}; + +#else +template <> +class LOCKABLE MaybeSpinLock<ThreadSafe> { + public: + void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_.lock(); } + void Unlock() UNLOCK_FUNCTION() { lock_.unlock(); } + void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() { + // Not supported by subtle::SpinLock. + } + + private: + subtle::SpinLock lock_; +}; +#endif // DCHECK_IS_ON() // An "extent" is a span of consecutive superpages. We link to the partition's // next extent (if there is one) to the very start of a superpage's metadata // area. +template <bool thread_safety> struct PartitionSuperPageExtentEntry { - PartitionRootBase* root; + PartitionRootBase<thread_safety>* root; char* super_page_base; char* super_pages_end; - PartitionSuperPageExtentEntry* next; + PartitionSuperPageExtentEntry<thread_safety>* next; }; static_assert( - sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize, + sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize, "PartitionSuperPageExtentEntry must be able to fit in a metadata slot"); +// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory. +static OomFunction g_oom_handling_function = nullptr; + +template <bool thread_safety> struct BASE_EXPORT PartitionRootBase { + using Page = PartitionPage<thread_safety>; + using Bucket = PartitionBucket<thread_safety>; + using ScopedGuard = internal::ScopedGuard<thread_safety>; + PartitionRootBase(); virtual ~PartitionRootBase(); + MaybeSpinLock<thread_safety> lock_; size_t total_size_of_committed_pages = 0; size_t total_size_of_super_pages = 0; size_t total_size_of_direct_mapped_pages = 0; @@ -50,10 +189,10 @@ struct BASE_EXPORT PartitionRootBase { char* next_super_page = nullptr; char* next_partition_page = nullptr; char* next_partition_page_end = nullptr; - PartitionSuperPageExtentEntry* current_extent = nullptr; - PartitionSuperPageExtentEntry* first_extent = nullptr; - PartitionDirectMapExtent* direct_map_list = nullptr; - PartitionPage* global_empty_page_ring[kMaxFreeableSpans] = {}; + PartitionSuperPageExtentEntry<thread_safety>* current_extent = nullptr; + PartitionSuperPageExtentEntry<thread_safety>* first_extent = nullptr; + PartitionDirectMapExtent<thread_safety>* direct_map_list = nullptr; + Page* global_empty_page_ring[kMaxFreeableSpans] = {}; int16_t global_empty_page_ring_index = 0; uintptr_t inverted_self = 0; @@ -69,42 +208,46 @@ struct BASE_EXPORT PartitionRootBase { // preserves the layering of the includes. // // Note the matching Free() functions are in PartitionPage. - ALWAYS_INLINE void* AllocFromBucket(PartitionBucket* bucket, - int flags, - size_t size); + ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket, int flags, size_t size) + EXCLUSIVE_LOCKS_REQUIRED(lock_); + ALWAYS_INLINE void Free(void* ptr); - ALWAYS_INLINE static bool IsValidPage(PartitionPage* page); - ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page); - - // g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory. - static OomFunction g_oom_handling_function; - NOINLINE void OutOfMemory(size_t size); + ALWAYS_INLINE static bool IsValidPage(Page* page); + ALWAYS_INLINE static PartitionRootBase* FromPage(Page* page); ALWAYS_INLINE void IncreaseCommittedPages(size_t len); ALWAYS_INLINE void DecreaseCommittedPages(size_t len); - ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length); - ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length); + ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length) + EXCLUSIVE_LOCKS_REQUIRED(lock_); + ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length) + EXCLUSIVE_LOCKS_REQUIRED(lock_); // Frees memory from this partition, if possible, by decommitting pages. // |flags| is an OR of base::PartitionPurgeFlags. virtual void PurgeMemory(int flags) = 0; - void DecommitEmptyPages(); + NOINLINE void OutOfMemory(size_t size); + + protected: + void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_); }; -ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket, - int flags, - size_t size) { +template <bool thread_safety> +ALWAYS_INLINE void* PartitionRootBase<thread_safety>::AllocFromBucket( + Bucket* bucket, + int flags, + size_t size) { bool zero_fill = flags & PartitionAllocZeroFill; bool is_already_zeroed = false; - PartitionPage* page = bucket->active_pages_head; + Page* page = bucket->active_pages_head; // Check that this page is neither full nor freed. + DCHECK(page); DCHECK(page->num_allocated_slots >= 0); void* ret = page->freelist_head; if (LIKELY(ret != 0)) { // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See // if we can afford to make these CHECKs. - DCHECK(PartitionRootBase::IsValidPage(page)); + DCHECK(IsValidPage(page)); // All large allocations must go through the slow path to correctly update // the size metadata. @@ -117,8 +260,7 @@ ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket, } else { ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed); // TODO(palmer): See if we can afford to make this a CHECK. - DCHECK(!ret || - PartitionRootBase::IsValidPage(PartitionPage::FromPointer(ret))); + DCHECK(!ret || IsValidPage(Page::FromPointer(ret))); } #if DCHECK_IS_ON() @@ -126,7 +268,7 @@ ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket, return nullptr; } - page = PartitionPage::FromPointer(ret); + page = Page::FromPointer(ret); // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just // be bucket->slot_size? size_t new_slot_size = page->bucket->slot_size; @@ -157,39 +299,78 @@ ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket, return ret; } -ALWAYS_INLINE bool PartitionRootBase::IsValidPage(PartitionPage* page) { +template <bool thread_safety> +ALWAYS_INLINE void PartitionRootBase<thread_safety>::Free(void* ptr) { +#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) + free(ptr); +#else + DCHECK(initialized); + + if (UNLIKELY(!ptr)) + return; + + if (PartitionAllocHooks::AreHooksEnabled()) { + PartitionAllocHooks::FreeObserverHookIfEnabled(ptr); + if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr)) + return; + } + + ptr = internal::PartitionCookieFreePointerAdjust(ptr); + Page* page = Page::FromPointer(ptr); + // TODO(palmer): See if we can afford to make this a CHECK. + DCHECK(IsValidPage(page)); + internal::DeferredUnmap deferred_unmap; + { + ScopedGuard guard{lock_}; + deferred_unmap = page->Free(ptr); + } + deferred_unmap.Run(); +#endif +} + +template <bool thread_safety> +ALWAYS_INLINE bool PartitionRootBase<thread_safety>::IsValidPage(Page* page) { PartitionRootBase* root = PartitionRootBase::FromPage(page); return root->inverted_self == ~reinterpret_cast<uintptr_t>(root); } -ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage( - PartitionPage* page) { - PartitionSuperPageExtentEntry* extent_entry = - reinterpret_cast<PartitionSuperPageExtentEntry*>( +template <bool thread_safety> +ALWAYS_INLINE PartitionRootBase<thread_safety>* +PartitionRootBase<thread_safety>::FromPage(Page* page) { + auto* extent_entry = + reinterpret_cast<PartitionSuperPageExtentEntry<thread_safety>*>( reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); return extent_entry->root; } -ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) { +template <bool thread_safety> +ALWAYS_INLINE void PartitionRootBase<thread_safety>::IncreaseCommittedPages( + size_t len) { total_size_of_committed_pages += len; DCHECK(total_size_of_committed_pages <= total_size_of_super_pages + total_size_of_direct_mapped_pages); } -ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) { +template <bool thread_safety> +ALWAYS_INLINE void PartitionRootBase<thread_safety>::DecreaseCommittedPages( + size_t len) { total_size_of_committed_pages -= len; DCHECK(total_size_of_committed_pages <= total_size_of_super_pages + total_size_of_direct_mapped_pages); } -ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address, - size_t length) { +template <bool thread_safety> +ALWAYS_INLINE void PartitionRootBase<thread_safety>::DecommitSystemPages( + void* address, + size_t length) { ::base::DecommitSystemPages(address, length); DecreaseCommittedPages(length); } -ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address, - size_t length) { +template <bool thread_safety> +ALWAYS_INLINE void PartitionRootBase<thread_safety>::RecommitSystemPages( + void* address, + size_t length) { CHECK(::base::RecommitSystemPages(address, length, PageReadWrite)); IncreaseCommittedPages(length); } diff --git a/chromium/base/allocator/partition_allocator/random.cc b/chromium/base/allocator/partition_allocator/random.cc index 7da12ddb1ed..14ecc168a5f 100644 --- a/chromium/base/allocator/partition_allocator/random.cc +++ b/chromium/base/allocator/partition_allocator/random.cc @@ -5,7 +5,6 @@ #include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/spin_lock.h" -#include "base/logging.h" #include "base/no_destructor.h" #include "base/rand_util.h" #include "base/synchronization/lock.h" diff --git a/chromium/base/allocator/tcmalloc_unittest.cc b/chromium/base/allocator/tcmalloc_unittest.cc index bd3ab177379..7a0a0c2808d 100644 --- a/chromium/base/allocator/tcmalloc_unittest.cc +++ b/chromium/base/allocator/tcmalloc_unittest.cc @@ -6,8 +6,8 @@ #include <stdio.h> #include "base/allocator/buildflags.h" +#include "base/check_op.h" #include "base/compiler_specific.h" -#include "base/logging.h" #include "base/process/process_metrics.h" #include "base/system/sys_info.h" #include "build/build_config.h" diff --git a/chromium/base/allocator/winheap_stubs_win.cc b/chromium/base/allocator/winheap_stubs_win.cc index 2ba8fe96aa5..cc08cf94b9f 100644 --- a/chromium/base/allocator/winheap_stubs_win.cc +++ b/chromium/base/allocator/winheap_stubs_win.cc @@ -16,7 +16,7 @@ #include <limits> #include "base/bits.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace allocator { diff --git a/chromium/base/allocator/winheap_stubs_win_unittest.cc b/chromium/base/allocator/winheap_stubs_win_unittest.cc index 369bc1b932a..cb641d746b2 100644 --- a/chromium/base/allocator/winheap_stubs_win_unittest.cc +++ b/chromium/base/allocator/winheap_stubs_win_unittest.cc @@ -5,7 +5,7 @@ #include "base/allocator/winheap_stubs_win.h" #include "base/bits.h" -#include "base/logging.h" +#include "base/check.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { diff --git a/chromium/base/android/jni_generator/BUILD.gn b/chromium/base/android/jni_generator/BUILD.gn index 8e95c35286c..3959bcba137 100644 --- a/chromium/base/android/jni_generator/BUILD.gn +++ b/chromium/base/android/jni_generator/BUILD.gn @@ -82,12 +82,6 @@ group("jni_generator_tests") { ] } -java_cpp_template("processor_args_java") { - package_path = "org/chromium/jni_generator" - sources = [ "ProcessorArgs.template" ] - defines = [ "HASH_JNI_NAMES_VALUE=$use_hashed_jni_names" ] -} - java_annotation_processor("jni_processor") { sources = [ "java/src/org/chromium/jni_generator/JniProcessor.java", @@ -110,6 +104,4 @@ java_annotation_processor("jni_processor") { "//third_party/android_deps:com_squareup_javapoet_java", "//third_party/android_deps:javax_annotation_jsr250_api_java", ] - - srcjar_deps = [ ":processor_args_java" ] } diff --git a/chromium/base/android/resource_exclusions.gni b/chromium/base/android/resource_exclusions.gni new file mode 100644 index 00000000000..f5b8d140241 --- /dev/null +++ b/chromium/base/android/resource_exclusions.gni @@ -0,0 +1,15 @@ +# Copyright 2020 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Strip xxxhdpi images to save size. It's extremely difficult to see the +# difference without magnification. https://crbug.com/691733. +common_resource_exclusion_regex = "drawable[^/]*-xxxhdpi" + +common_resource_exclusion_exceptions = [ + "*shadow*", # Combination of gradient & transparency cause pixelation. + "*.9.*", # Most nine-patches contain shadows. +] + +# Remove WearOS resources (a couple exist in appcompat). +common_resource_exclusion_regex += "|-watch\b" diff --git a/chromium/base/at_exit.cc b/chromium/base/at_exit.cc index eb7d26cdc7c..698d9f319ad 100644 --- a/chromium/base/at_exit.cc +++ b/chromium/base/at_exit.cc @@ -10,7 +10,8 @@ #include "base/bind.h" #include "base/callback.h" -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" namespace base { diff --git a/chromium/base/base64_encode_fuzzer.cc b/chromium/base/base64_encode_fuzzer.cc index 57086efb843..4e577104f43 100644 --- a/chromium/base/base64_encode_fuzzer.cc +++ b/chromium/base/base64_encode_fuzzer.cc @@ -5,7 +5,7 @@ #include <string> #include "base/base64.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/strings/string_piece.h" // Encode some random data, and then decode it. diff --git a/chromium/base/base_paths_android.cc b/chromium/base/base_paths_android.cc index 078f565a5ca..e82af0e65a4 100644 --- a/chromium/base/base_paths_android.cc +++ b/chromium/base/base_paths_android.cc @@ -13,7 +13,7 @@ #include "base/base_paths.h" #include "base/files/file_path.h" #include "base/files/file_util.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/process/process_metrics.h" namespace base { diff --git a/chromium/base/base_paths_fuchsia.cc b/chromium/base/base_paths_fuchsia.cc index fe12f3617a3..0d948b3aace 100644 --- a/chromium/base/base_paths_fuchsia.cc +++ b/chromium/base/base_paths_fuchsia.cc @@ -10,6 +10,7 @@ #include "base/command_line.h" #include "base/files/file_util.h" #include "base/fuchsia/file_utils.h" +#include "base/logging.h" #include "base/path_service.h" #include "base/process/process.h" diff --git a/chromium/base/base_paths_mac.mm b/chromium/base/base_paths_mac.mm index f10b2c3121e..fbcfde36c80 100644 --- a/chromium/base/base_paths_mac.mm +++ b/chromium/base/base_paths_mac.mm @@ -11,12 +11,13 @@ #include <stdint.h> #include "base/base_paths.h" +#include "base/check_op.h" #include "base/compiler_specific.h" #include "base/files/file_path.h" #include "base/files/file_util.h" -#include "base/logging.h" #include "base/mac/bundle_locations.h" #include "base/mac/foundation_util.h" +#include "base/notreached.h" #include "base/path_service.h" #include "base/strings/string_util.h" #include "base/threading/thread_restrictions.h" @@ -82,6 +83,11 @@ bool PathProviderMac(int key, base::FilePath* result) { return success; } case base::DIR_SOURCE_ROOT: +#if defined(OS_IOS) + // On iOS, there is no access to source root, however, the necessary + // resources are packaged into the test as assets. + return PathService::Get(base::DIR_ASSETS, result); +#else // Go through PathService to catch overrides. if (!PathService::Get(base::FILE_EXE, result)) return false; @@ -89,7 +95,6 @@ bool PathProviderMac(int key, base::FilePath* result) { // Start with the executable's directory. *result = result->DirName(); -#if !defined(OS_IOS) if (base::mac::AmIBundled()) { // The bundled app executables (Chromium, TestShell, etc) live five // levels down, eg: @@ -100,7 +105,7 @@ bool PathProviderMac(int key, base::FilePath* result) { // src/xcodebuild/{Debug|Release}/base_unittests *result = result->DirName().DirName(); } -#endif +#endif // !defined(OS_IOS) return true; case base::DIR_USER_DESKTOP: #if defined(OS_IOS) @@ -112,7 +117,7 @@ bool PathProviderMac(int key, base::FilePath* result) { #endif case base::DIR_ASSETS: #if defined(OS_IOS) - // TODO(https://crbug.com/957792): Assets live alongside the executable. + // On iOS, the assets are located next to the module binary. return PathService::Get(base::DIR_MODULE, result); #else if (!base::mac::AmIBundled()) { diff --git a/chromium/base/big_endian.cc b/chromium/base/big_endian.cc index 9e9e672e490..a706c4a4ecc 100644 --- a/chromium/base/big_endian.cc +++ b/chromium/base/big_endian.cc @@ -4,6 +4,8 @@ #include "base/big_endian.h" +#include <string.h> + #include "base/numerics/checked_math.h" #include "base/strings/string_piece.h" diff --git a/chromium/base/bind.h b/chromium/base/bind.h index 5884c62ae40..159ec8b92fa 100644 --- a/chromium/base/bind.h +++ b/chromium/base/bind.h @@ -276,24 +276,30 @@ Bind(Functor&& functor, Args&&... args) { } // Special cases for binding to a base::Callback without extra bound arguments. +// We CHECK() the validity of callback to guard against null pointers +// accidentally ending up in posted tasks, causing hard-to-debug crashes. template <typename Signature> OnceCallback<Signature> BindOnce(OnceCallback<Signature> callback) { + CHECK(callback); return callback; } template <typename Signature> OnceCallback<Signature> BindOnce(RepeatingCallback<Signature> callback) { + CHECK(callback); return callback; } template <typename Signature> RepeatingCallback<Signature> BindRepeating( RepeatingCallback<Signature> callback) { + CHECK(callback); return callback; } template <typename Signature> Callback<Signature> Bind(Callback<Signature> callback) { + CHECK(callback); return callback; } diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h index cb56382055f..d9a6a23a5c2 100644 --- a/chromium/base/bind_internal.h +++ b/chromium/base/bind_internal.h @@ -16,6 +16,7 @@ #include "base/bind.h" #include "base/callback_internal.h" #include "base/compiler_specific.h" +#include "base/logging.h" #include "base/memory/raw_scoped_refptr_mismatch_checker.h" #include "base/memory/weak_ptr.h" #include "base/template_util.h" @@ -369,6 +370,7 @@ struct FunctorTraits<Functor, using RunType = ExtractCallableRunType<Functor>; static constexpr bool is_method = false; static constexpr bool is_nullable = false; + static constexpr bool is_callback = false; template <typename RunFunctor, typename... RunArgs> static ExtractReturnType<RunType> Invoke(RunFunctor&& functor, @@ -383,6 +385,7 @@ struct FunctorTraits<R (*)(Args...)> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename Function, typename... RunArgs> static R Invoke(Function&& function, RunArgs&&... args) { @@ -398,6 +401,7 @@ struct FunctorTraits<R(__stdcall*)(Args...)> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename... RunArgs> static R Invoke(R(__stdcall* function)(Args...), RunArgs&&... args) { @@ -411,6 +415,7 @@ struct FunctorTraits<R(__fastcall*)(Args...)> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename... RunArgs> static R Invoke(R(__fastcall* function)(Args...), RunArgs&&... args) { @@ -440,6 +445,7 @@ struct FunctorTraits<R (^)(Args...)> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename BlockType, typename... RunArgs> static R Invoke(BlockType&& block, RunArgs&&... args) { @@ -461,6 +467,7 @@ struct FunctorTraits<base::mac::ScopedBlock<R (^)(Args...)>> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename BlockType, typename... RunArgs> static R Invoke(BlockType&& block, RunArgs&&... args) { @@ -481,6 +488,7 @@ struct FunctorTraits<R (Receiver::*)(Args...)> { using RunType = R(Receiver*, Args...); static constexpr bool is_method = true; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename Method, typename ReceiverPtr, typename... RunArgs> static R Invoke(Method method, @@ -496,6 +504,7 @@ struct FunctorTraits<R (Receiver::*)(Args...) const> { using RunType = R(const Receiver*, Args...); static constexpr bool is_method = true; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename Method, typename ReceiverPtr, typename... RunArgs> static R Invoke(Method method, @@ -513,6 +522,7 @@ struct FunctorTraits<R (__stdcall Receiver::*)(Args...)> { using RunType = R(Receiver*, Args...); static constexpr bool is_method = true; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename Method, typename ReceiverPtr, typename... RunArgs> static R Invoke(Method method, @@ -528,6 +538,7 @@ struct FunctorTraits<R (__stdcall Receiver::*)(Args...) const> { using RunType = R(const Receiver*, Args...); static constexpr bool is_method = true; static constexpr bool is_nullable = true; + static constexpr bool is_callback = false; template <typename Method, typename ReceiverPtr, typename... RunArgs> static R Invoke(Method method, @@ -577,6 +588,7 @@ struct FunctorTraits<OnceCallback<R(Args...)>> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = true; template <typename CallbackType, typename... RunArgs> static R Invoke(CallbackType&& callback, RunArgs&&... args) { @@ -592,6 +604,7 @@ struct FunctorTraits<RepeatingCallback<R(Args...)>> { using RunType = R(Args...); static constexpr bool is_method = false; static constexpr bool is_nullable = true; + static constexpr bool is_callback = true; template <typename CallbackType, typename... RunArgs> static R Invoke(CallbackType&& callback, RunArgs&&... args) { @@ -829,7 +842,6 @@ struct BindState final : BindStateBase { using IsCancellable = bool_constant< CallbackCancellationTraits<Functor, std::tuple<BoundArgs...>>::is_cancellable>; - template <typename ForwardFunctor, typename... ForwardBoundArgs> static BindState* Create(BindStateBase::InvokeFuncStorage invoke_func, ForwardFunctor&& functor, @@ -850,6 +862,9 @@ struct BindState final : BindStateBase { std::tuple<BoundArgs...> bound_args_; private: + static constexpr bool is_nested_callback = + internal::MakeFunctorTraits<Functor>::is_callback; + template <typename ForwardFunctor, typename... ForwardBoundArgs> explicit BindState(std::true_type, BindStateBase::InvokeFuncStorage invoke_func, @@ -860,7 +875,15 @@ struct BindState final : BindStateBase { &QueryCancellationTraits<BindState>), functor_(std::forward<ForwardFunctor>(functor)), bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) { - DCHECK(!IsNull(functor_)); + // We check the validity of nested callbacks (e.g., Bind(callback, ...)) in + // release builds to avoid null pointers from ending up in posted tasks, + // causing hard-to-diagnose crashes. Ideally we'd do this for all functors + // here, but that would have a large binary size impact. + if (is_nested_callback) { + CHECK(!IsNull(functor_)); + } else { + DCHECK(!IsNull(functor_)); + } } template <typename ForwardFunctor, typename... ForwardBoundArgs> @@ -871,7 +894,12 @@ struct BindState final : BindStateBase { : BindStateBase(invoke_func, &Destroy), functor_(std::forward<ForwardFunctor>(functor)), bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) { - DCHECK(!IsNull(functor_)); + // See above for CHECK/DCHECK rationale. + if (is_nested_callback) { + CHECK(!IsNull(functor_)); + } else { + DCHECK(!IsNull(functor_)); + } } ~BindState() = default; diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc index 4d2240db9a5..36282ac11e6 100644 --- a/chromium/base/bind_unittest.cc +++ b/chromium/base/bind_unittest.cc @@ -1587,7 +1587,18 @@ TEST_F(BindTest, BindNoexcept) { TEST(BindDeathTest, NullCallback) { base::RepeatingCallback<void(int)> null_cb; ASSERT_TRUE(null_cb.is_null()); - EXPECT_DCHECK_DEATH(base::BindRepeating(null_cb, 42)); + EXPECT_CHECK_DEATH(base::BindRepeating(null_cb, 42)); +} + +TEST(BindDeathTest, NullFunctionPointer) { + void (*null_function)(int) = nullptr; + EXPECT_DCHECK_DEATH(base::BindRepeating(null_function, 42)); +} + +TEST(BindDeathTest, NullCallbackWithoutBoundArgs) { + base::OnceCallback<void(int)> null_cb; + ASSERT_TRUE(null_cb.is_null()); + EXPECT_CHECK_DEATH(base::BindOnce(std::move(null_cb))); } TEST(BindDeathTest, BanFirstOwnerOfRefCountedType) { diff --git a/chromium/base/build_time.cc b/chromium/base/build_time.cc index c7462b15fb1..447e445327d 100644 --- a/chromium/base/build_time.cc +++ b/chromium/base/build_time.cc @@ -7,7 +7,7 @@ // Imports the generated build date, i.e. BUILD_DATE. #include "base/generated_build_date.h" -#include "base/logging.h" +#include "base/check.h" #include "base/time/time.h" namespace base { diff --git a/chromium/base/callback.h b/chromium/base/callback.h index 1427faaaea9..478a353eba9 100644 --- a/chromium/base/callback.h +++ b/chromium/base/callback.h @@ -13,6 +13,7 @@ #include "base/callback_forward.h" #include "base/callback_internal.h" +#include "base/logging.h" // ----------------------------------------------------------------------------- // Usage documentation diff --git a/chromium/base/callback_helpers.h b/chromium/base/callback_helpers.h index a35fd040291..ad3fc3267fc 100644 --- a/chromium/base/callback_helpers.h +++ b/chromium/base/callback_helpers.h @@ -33,11 +33,24 @@ struct IsBaseCallbackImpl<OnceCallback<R(Args...)>> : std::true_type {}; template <typename R, typename... Args> struct IsBaseCallbackImpl<RepeatingCallback<R(Args...)>> : std::true_type {}; +template <typename T> +struct IsOnceCallbackImpl : std::false_type {}; + +template <typename R, typename... Args> +struct IsOnceCallbackImpl<OnceCallback<R(Args...)>> : std::true_type {}; + } // namespace internal +// IsBaseCallback<T>::value is true when T is any of the Closure or Callback +// family of types. template <typename T> using IsBaseCallback = internal::IsBaseCallbackImpl<std::decay_t<T>>; +// IsOnceCallback<T>::value is true when T is a OnceClosure or OnceCallback +// type. +template <typename T> +using IsOnceCallback = internal::IsOnceCallbackImpl<std::decay_t<T>>; + // SFINAE friendly enabler allowing to overload methods for both Repeating and // OnceCallbacks. // diff --git a/chromium/base/callback_helpers_unittest.cc b/chromium/base/callback_helpers_unittest.cc index 78c7ad0ad5d..895e6efaff9 100644 --- a/chromium/base/callback_helpers_unittest.cc +++ b/chromium/base/callback_helpers_unittest.cc @@ -5,6 +5,7 @@ #include "base/callback_helpers.h" #include <functional> +#include <type_traits> #include "base/bind.h" #include "base/callback.h" @@ -43,6 +44,45 @@ TEST(CallbackHelpersTest, IsBaseCallback) { static_assert(!base::IsBaseCallback<std::function<void()>&&>::value, ""); } +TEST(CallbackHelpersTest, IsOnceCallback) { + // Check that base::OnceClosures and references to them are considered + // base::OnceCallbacks, but base::RepeatingClosures are not. + static_assert(base::IsOnceCallback<base::OnceClosure>::value, ""); + static_assert(!base::IsOnceCallback<base::RepeatingClosure>::value, ""); + static_assert(base::IsOnceCallback<base::OnceClosure&&>::value, ""); + static_assert(!base::IsOnceCallback<const base::RepeatingClosure&>::value, + ""); + + // Check that base::OnceCallbacks with a given RunType and references to them + // are considered base::OnceCallbacks, but base::RepeatingCallbacks are not. + static_assert(base::IsOnceCallback<base::OnceCallback<int(int)>>::value, ""); + static_assert(!base::IsOnceCallback<base::RepeatingCallback<int(int)>>::value, + ""); + static_assert(base::IsOnceCallback<base::OnceCallback<int(int)>&&>::value, + ""); + static_assert( + !base::IsOnceCallback<const base::RepeatingCallback<int(int)>&>::value, + ""); + + // Check that POD types are not considered base::OnceCallbacks. + static_assert(!base::IsOnceCallback<bool>::value, ""); + static_assert(!base::IsOnceCallback<int>::value, ""); + static_assert(!base::IsOnceCallback<double>::value, ""); + + // Check that the closely related std::function is not considered a + // base::OnceCallback. + static_assert(!base::IsOnceCallback<std::function<void()>>::value, ""); + static_assert(!base::IsOnceCallback<const std::function<void()>&>::value, ""); + static_assert(!base::IsOnceCallback<std::function<void()>&&>::value, ""); + + // Check that the result of BindOnce is a OnceCallback, but not if it's + // wrapped in AdaptCallbackForRepeating. + auto cb = base::BindOnce([](int* count) { ++*count; }); + static_assert(base::IsOnceCallback<decltype(cb)>::value, ""); + auto wrapped = base::AdaptCallbackForRepeating(std::move(cb)); + static_assert(!base::IsOnceCallback<decltype(wrapped)>::value, ""); +} + void Increment(int* value) { (*value)++; } diff --git a/chromium/base/callback_internal.cc b/chromium/base/callback_internal.cc index 6a185d90929..0dd5d6c3630 100644 --- a/chromium/base/callback_internal.cc +++ b/chromium/base/callback_internal.cc @@ -4,7 +4,8 @@ #include "base/callback_internal.h" -#include "base/logging.h" +#include "base/check.h" +#include "base/notreached.h" namespace base { namespace internal { diff --git a/chromium/base/callback_list.h b/chromium/base/callback_list.h index 781adbf4eb3..15cf0b291db 100644 --- a/chromium/base/callback_list.h +++ b/chromium/base/callback_list.h @@ -5,229 +5,298 @@ #ifndef BASE_CALLBACK_LIST_H_ #define BASE_CALLBACK_LIST_H_ +#include <algorithm> #include <list> #include <memory> +#include <utility> +#include "base/auto_reset.h" #include "base/bind.h" #include "base/callback.h" +#include "base/callback_helpers.h" #include "base/compiler_specific.h" #include "base/logging.h" -#include "base/macros.h" #include "base/memory/weak_ptr.h" +#include "base/stl_util.h" // OVERVIEW: // -// A container for a list of (repeating) callbacks. Unlike a normal vector or -// list, this container can be modified during iteration without invalidating -// the iterator. It safely handles the case of a callback removing itself or -// another callback from the list while callbacks are being run. +// A container for a list of callbacks. Provides callers the ability to manually +// or automatically unregister callbacks at any time, including during callback +// notification. // // TYPICAL USAGE: // // class MyWidget { // public: -// ... +// using CallbackList = base::RepeatingCallbackList<void(const Foo&)>; // -// std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription> -// RegisterCallback(const base::RepeatingCallback<void(const Foo&)>& cb) { -// return callback_list_.Add(cb); +// // Registers |cb| to be called whenever NotifyFoo() is executed. +// std::unique_ptr<CallbackList::Subscription> +// RegisterCallback(CallbackList::CallbackType cb) { +// return callback_list_.Add(std::move(cb)); // } // // private: +// // Calls all registered callbacks, with |foo| as the supplied arg. // void NotifyFoo(const Foo& foo) { -// callback_list_.Notify(foo); +// callback_list_.Notify(foo); // } // -// base::CallbackList<void(const Foo&)> callback_list_; -// -// DISALLOW_COPY_AND_ASSIGN(MyWidget); +// CallbackList callback_list_; // }; // // // class MyWidgetListener { -// public: -// MyWidgetListener::MyWidgetListener() { -// foo_subscription_ = MyWidget::GetCurrent()->RegisterCallback( -// base::BindRepeating(&MyWidgetListener::OnFoo, this))); -// } -// -// MyWidgetListener::~MyWidgetListener() { -// // Subscription gets deleted automatically and will deregister -// // the callback in the process. -// } -// // private: // void OnFoo(const Foo& foo) { -// // Do something. +// // Called whenever MyWidget::NotifyFoo() is executed, unless +// // |foo_subscription_| has been reset(). // } // -// std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription> -// foo_subscription_; -// -// DISALLOW_COPY_AND_ASSIGN(MyWidgetListener); +// // Automatically deregisters the callback when deleted (e.g. in +// // ~MyWidgetListener()). +// std::unique_ptr<MyWidget::CallbackList::Subscription> foo_subscription_ = +// MyWidget::Get()->RegisterCallback( +// base::BindRepeating(&MyWidgetListener::OnFoo, this)); // }; +// +// UNSUPPORTED: +// +// * Calling Notify() reentrantly during callback notification. +// * Destroying the CallbackList during callback notification. +// +// Both of these are possible to support, but not currently necessary. namespace base { +template <typename Signature> +class OnceCallbackList; + +template <typename Signature> +class RepeatingCallbackList; + namespace internal { -template <typename CallbackType> +// A traits class to break circular type dependencies between CallbackListBase +// and its subclasses. +template <typename CallbackList> +struct CallbackListTraits; + +template <typename Signature> +struct CallbackListTraits<OnceCallbackList<Signature>> { + using CallbackType = OnceCallback<Signature>; + using Callbacks = std::list<CallbackType>; +}; + +template <typename Signature> +struct CallbackListTraits<RepeatingCallbackList<Signature>> { + using CallbackType = RepeatingCallback<Signature>; + using Callbacks = std::list<CallbackType>; +}; + +template <typename CallbackListImpl> class CallbackListBase { public: + using CallbackType = + typename CallbackListTraits<CallbackListImpl>::CallbackType; + static_assert(IsBaseCallback<CallbackType>::value, ""); + + // A cancellation handle for callers who register callbacks. Subscription + // destruction cancels the associated callback and is legal any time, + // including after the destruction of the CallbackList that vends it. class Subscription { public: - explicit Subscription(base::OnceClosure subscription_destroyed) - : subscription_destroyed_(std::move(subscription_destroyed)) {} + explicit Subscription(base::OnceClosure destruction_closure) + : destruction_closure_(std::move(destruction_closure)) {} - ~Subscription() { std::move(subscription_destroyed_).Run(); } + Subscription(const Subscription&) = delete; + Subscription& operator=(const Subscription&) = delete; - // Returns true if the CallbackList associated with this subscription has - // been deleted, which means that the associated callback will no longer be - // invoked. - bool IsCancelled() const { return subscription_destroyed_.IsCancelled(); } + ~Subscription() { std::move(destruction_closure_).Run(); } private: - base::OnceClosure subscription_destroyed_; - - DISALLOW_COPY_AND_ASSIGN(Subscription); + // Run when |this| is destroyed to notify the CallbackList the associated + // callback should be canceled. Since this is bound using a WeakPtr to the + // CallbackList, it will automatically no-op if the CallbackList no longer + // exists. + base::OnceClosure destruction_closure_; }; - // Add a callback to the list. The callback will remain registered until the - // returned Subscription is destroyed. When the CallbackList is destroyed, any - // outstanding subscriptions are safely invalidated. - std::unique_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT { + CallbackListBase() = default; + CallbackListBase(const CallbackListBase&) = delete; + CallbackListBase& operator=(const CallbackListBase&) = delete; + + ~CallbackListBase() { + // Destroying the list during iteration is unsupported and will cause a UAF. + CHECK(!iterating_); + } + + // Registers |cb| for future notifications. Returns a Subscription that can be + // used to cancel |cb|. + std::unique_ptr<Subscription> Add(CallbackType cb) WARN_UNUSED_RESULT { DCHECK(!cb.is_null()); - return std::make_unique<Subscription>( - base::BindOnce(&CallbackListBase::OnSubscriptionDestroyed, - weak_ptr_factory_.GetWeakPtr(), - callbacks_.insert(callbacks_.end(), cb))); + return std::make_unique<Subscription>(base::BindOnce( + &CallbackListBase::CancelCallback, weak_ptr_factory_.GetWeakPtr(), + callbacks_.insert(callbacks_.end(), std::move(cb)))); } - // Sets a callback which will be run when a subscription list is changed. - void set_removal_callback(const RepeatingClosure& callback) { - removal_callback_ = callback; + // Registers |removal_callback| to be run after elements are removed from the + // list of registered callbacks. + void set_removal_callback(const RepeatingClosure& removal_callback) { + removal_callback_ = removal_callback; } - // Returns true if there are no subscriptions. This is only valid to call when - // not looping through the list. - bool empty() { - DCHECK_EQ(0u, active_iterator_count_); + // Returns whether the list of registered callbacks is empty. This may not be + // called while Notify() is traversing the list (since the results could be + // inaccurate). + bool empty() const { + DCHECK(!iterating_); return callbacks_.empty(); } - protected: - // An iterator class that can be used to access the list of callbacks. - class Iterator { - public: - explicit Iterator(CallbackListBase<CallbackType>* list) - : list_(list), - list_iter_(list_->callbacks_.begin()) { - ++list_->active_iterator_count_; - } - - Iterator(const Iterator& iter) - : list_(iter.list_), - list_iter_(iter.list_iter_) { - ++list_->active_iterator_count_; - } - - ~Iterator() { - if (list_ && --list_->active_iterator_count_ == 0) { - list_->Compact(); - } - } - - CallbackType* GetNext() { - while ((list_iter_ != list_->callbacks_.end()) && list_iter_->is_null()) - ++list_iter_; - - CallbackType* cb = nullptr; - if (list_iter_ != list_->callbacks_.end()) { - cb = &(*list_iter_); - ++list_iter_; - } - return cb; + // Calls all registered callbacks that are not canceled beforehand. If any + // callbacks are unregistered, notifies any registered removal callback at the + // end. + template <typename... RunArgs> + void Notify(RunArgs&&... args) { + // Calling Notify() reentrantly is currently unsupported. + DCHECK(!iterating_); + + if (empty()) + return; // Nothing to do. + + // Canceled callbacks should be removed from the list whenever notification + // isn't in progress, so right now all callbacks should be valid. + const auto callback_valid = [](const auto& cb) { return !cb.is_null(); }; + DCHECK(std::all_of(callbacks_.cbegin(), callbacks_.cend(), callback_valid)); + + { + AutoReset<bool> iterating(&iterating_, true); + // Skip any callbacks that are canceled during iteration. + for (auto it = callbacks_.begin(); it != callbacks_.end(); + it = std::find_if(it, callbacks_.end(), callback_valid)) + static_cast<CallbackListImpl*>(this)->RunCallback(it++, args...); } - private: - CallbackListBase<CallbackType>* list_; - typename std::list<CallbackType>::iterator list_iter_; - }; - - CallbackListBase() = default; - - ~CallbackListBase() { DCHECK_EQ(0u, active_iterator_count_); } - - // Returns an instance of a CallbackListBase::Iterator which can be used - // to run callbacks. - Iterator GetIterator() { - return Iterator(this); + // Any null callbacks remaining in the list were canceled due to + // Subscription destruction during iteration, and can safely be erased now. + const size_t erased_callbacks = + EraseIf(callbacks_, [](const auto& cb) { return cb.is_null(); }); + + // Run |removal_callback_| if any callbacks were canceled. Note that we + // cannot simply compare list sizes before and after iterating, since + // notification may result in Add()ing new callbacks as well as canceling + // them. Also note that if this is a OnceCallbackList, the OnceCallbacks + // that were executed above have all been removed regardless of whether + // they're counted in |erased_callbacks_|. + if (removal_callback_ && + (erased_callbacks || IsOnceCallback<CallbackType>::value)) + removal_callback_.Run(); // May delete |this|! } - // Compact the list: remove any entries which were nulled out during - // iteration. - void Compact() { - auto it = callbacks_.begin(); - bool updated = false; - while (it != callbacks_.end()) { - if ((*it).is_null()) { - updated = true; - it = callbacks_.erase(it); - } else { - ++it; - } - } + protected: + using Callbacks = typename CallbackListTraits<CallbackListImpl>::Callbacks; - if (updated && !removal_callback_.is_null()) - removal_callback_.Run(); - } + // Holds non-null callbacks, which will be called during Notify(). + Callbacks callbacks_; private: - void OnSubscriptionDestroyed( - const typename std::list<CallbackType>::iterator& iter) { - if (active_iterator_count_) { - iter->Reset(); + // Cancels the callback pointed to by |it|, which is guaranteed to be valid. + void CancelCallback(const typename Callbacks::iterator& it) { + if (static_cast<CallbackListImpl*>(this)->CancelNullCallback(it)) + return; + + if (iterating_) { + // Calling erase() here is unsafe, since the loop in Notify() may be + // referencing this same iterator, e.g. if adjacent callbacks' + // Subscriptions are both destroyed when the first one is Run(). Just + // reset the callback and let Notify() clean it up at the end. + it->Reset(); } else { - callbacks_.erase(iter); + callbacks_.erase(it); if (removal_callback_) - removal_callback_.Run(); + removal_callback_.Run(); // May delete |this|! } - // Note that |removal_callback_| may destroy |this|. } - std::list<CallbackType> callbacks_; - size_t active_iterator_count_ = 0; + // Set while Notify() is traversing |callbacks_|. Used primarily to avoid + // invalidating iterators that may be in use. + bool iterating_ = false; + + // Called after elements are removed from |callbacks_|. RepeatingClosure removal_callback_; - WeakPtrFactory<CallbackListBase> weak_ptr_factory_{this}; - DISALLOW_COPY_AND_ASSIGN(CallbackListBase); + WeakPtrFactory<CallbackListBase> weak_ptr_factory_{this}; }; } // namespace internal -template <typename Sig> class CallbackList; +template <typename Signature> +class OnceCallbackList + : public internal::CallbackListBase<OnceCallbackList<Signature>> { + private: + friend internal::CallbackListBase<OnceCallbackList>; + using Traits = internal::CallbackListTraits<OnceCallbackList>; -template <typename... Args> -class CallbackList<void(Args...)> - : public internal::CallbackListBase<RepeatingCallback<void(Args...)>> { - public: - using CallbackType = RepeatingCallback<void(Args...)>; + // Runs the current callback, which may cancel it or any other callbacks. + template <typename... RunArgs> + void RunCallback(typename Traits::Callbacks::iterator it, RunArgs&&... args) { + // OnceCallbacks still have Subscriptions with outstanding iterators; + // splice() removes them from |callbacks_| without invalidating those. + null_callbacks_.splice(null_callbacks_.end(), this->callbacks_, it); - CallbackList() = default; + std::move(*it).Run(args...); + } - template <typename... RunArgs> - void Notify(RunArgs&&... args) { - auto it = this->GetIterator(); - CallbackType* cb; - while ((cb = it.GetNext()) != nullptr) { - cb->Run(args...); + // If |it| refers to an already-canceled callback, does any necessary cleanup + // and returns true. Otherwise returns false. + bool CancelNullCallback(const typename Traits::Callbacks::iterator& it) { + if (it->is_null()) { + null_callbacks_.erase(it); + return true; } + return false; } + // Holds null callbacks whose Subscriptions are still alive, so the + // Subscriptions will still contain valid iterators. Only needed for + // OnceCallbacks, since RepeatingCallbacks are not canceled except by + // Subscription destruction. + typename Traits::Callbacks null_callbacks_; +}; + +template <typename Signature> +class RepeatingCallbackList + : public internal::CallbackListBase<RepeatingCallbackList<Signature>> { private: - DISALLOW_COPY_AND_ASSIGN(CallbackList); + friend internal::CallbackListBase<RepeatingCallbackList>; + using Traits = internal::CallbackListTraits<RepeatingCallbackList>; + // Runs the current callback, which may cancel it or any other callbacks. + template <typename... RunArgs> + void RunCallback(typename Traits::Callbacks::iterator it, RunArgs&&... args) { + it->Run(args...); + } + + // If |it| refers to an already-canceled callback, does any necessary cleanup + // and returns true. Otherwise returns false. + bool CancelNullCallback(const typename Traits::Callbacks::iterator& it) { + // Because at most one Subscription can point to a given callback, and + // RepeatingCallbacks are only reset by CancelCallback(), no one should be + // able to request cancellation of a canceled RepeatingCallback. + DCHECK(!it->is_null()); + return false; + } }; +template <typename Signature> +using CallbackList = RepeatingCallbackList<Signature>; + +// Syntactic sugar to parallel that used for Callbacks. +using OnceClosureList = OnceCallbackList<void()>; +using RepeatingClosureList = RepeatingCallbackList<void()>; +using ClosureList = CallbackList<void()>; + } // namespace base #endif // BASE_CALLBACK_LIST_H_ diff --git a/chromium/base/callback_list_unittest.cc b/chromium/base/callback_list_unittest.cc index e7a97afba49..2f15a8aac36 100644 --- a/chromium/base/callback_list_unittest.cc +++ b/chromium/base/callback_list_unittest.cc @@ -9,7 +9,6 @@ #include "base/bind.h" #include "base/bind_helpers.h" -#include "base/macros.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { @@ -17,46 +16,54 @@ namespace { class Listener { public: - Listener() : total_(0), scaler_(1) {} - explicit Listener(int scaler) : total_(0), scaler_(scaler) {} - void IncrementTotal() { total_++; } + Listener() = default; + explicit Listener(int scaler) : scaler_(scaler) {} + Listener(const Listener&) = delete; + Listener& operator=(const Listener&) = delete; + ~Listener() = default; + + void IncrementTotal() { ++total_; } + void IncrementByMultipleOfScaler(int x) { total_ += x * scaler_; } int total() const { return total_; } private: - int total_; - int scaler_; - DISALLOW_COPY_AND_ASSIGN(Listener); + int total_ = 0; + int scaler_ = 1; }; +template <typename T> class Remover { public: - Remover() : total_(0) {} + Remover() = default; + Remover(const Remover&) = delete; + Remover& operator=(const Remover&) = delete; + ~Remover() = default; + void IncrementTotalAndRemove() { - total_++; + ++total_; removal_subscription_.reset(); } - void SetSubscriptionToRemove( - std::unique_ptr<CallbackList<void(void)>::Subscription> sub) { + + void SetSubscriptionToRemove(std::unique_ptr<typename T::Subscription> sub) { removal_subscription_ = std::move(sub); } int total() const { return total_; } private: - int total_; - std::unique_ptr<CallbackList<void(void)>::Subscription> removal_subscription_; - DISALLOW_COPY_AND_ASSIGN(Remover); + int total_ = 0; + std::unique_ptr<typename T::Subscription> removal_subscription_; }; class Adder { public: - explicit Adder(CallbackList<void(void)>* cb_reg) - : added_(false), - total_(0), - cb_reg_(cb_reg) { - } + explicit Adder(RepeatingClosureList* cb_reg) : cb_reg_(cb_reg) {} + Adder(const Adder&) = delete; + Adder& operator=(const Adder&) = delete; + ~Adder() = default; + void AddCallback() { if (!added_) { added_ = true; @@ -64,23 +71,25 @@ class Adder { cb_reg_->Add(BindRepeating(&Adder::IncrementTotal, Unretained(this))); } } - void IncrementTotal() { total_++; } - bool added() const { return added_; } + void IncrementTotal() { ++total_; } + bool added() const { return added_; } int total() const { return total_; } private: - bool added_; - int total_; - CallbackList<void(void)>* cb_reg_; - std::unique_ptr<CallbackList<void(void)>::Subscription> subscription_; - DISALLOW_COPY_AND_ASSIGN(Adder); + bool added_ = false; + int total_ = 0; + RepeatingClosureList* cb_reg_; + std::unique_ptr<RepeatingClosureList::Subscription> subscription_; }; class Summer { public: - Summer() : value_(0) {} + Summer() = default; + Summer(const Summer&) = delete; + Summer& operator=(const Summer&) = delete; + ~Summer() = default; void AddOneParam(int a) { value_ = a; } void AddTwoParam(int a, int b) { value_ = a + b; } @@ -96,68 +105,72 @@ class Summer { int value() const { return value_; } private: - int value_; - DISALLOW_COPY_AND_ASSIGN(Summer); + int value_ = 0; }; class Counter { public: - Counter() : value_(0) {} + Counter() = default; + Counter(const Counter&) = delete; + Counter& operator=(const Counter&) = delete; + ~Counter() = default; - void Increment() { value_++; } + void Increment() { ++value_; } int value() const { return value_; } private: - int value_; - DISALLOW_COPY_AND_ASSIGN(Counter); + int value_ = 0; }; // Sanity check that we can instantiate a CallbackList for each arity. TEST(CallbackListTest, ArityTest) { Summer s; - CallbackList<void(int)> c1; - std::unique_ptr<CallbackList<void(int)>::Subscription> subscription1 = - c1.Add(BindRepeating(&Summer::AddOneParam, Unretained(&s))); + RepeatingCallbackList<void(int)> c1; + std::unique_ptr<RepeatingCallbackList<void(int)>::Subscription> + subscription1 = + c1.Add(BindRepeating(&Summer::AddOneParam, Unretained(&s))); c1.Notify(1); EXPECT_EQ(1, s.value()); - CallbackList<void(int, int)> c2; - std::unique_ptr<CallbackList<void(int, int)>::Subscription> subscription2 = - c2.Add(BindRepeating(&Summer::AddTwoParam, Unretained(&s))); + RepeatingCallbackList<void(int, int)> c2; + std::unique_ptr<RepeatingCallbackList<void(int, int)>::Subscription> + subscription2 = + c2.Add(BindRepeating(&Summer::AddTwoParam, Unretained(&s))); c2.Notify(1, 2); EXPECT_EQ(3, s.value()); - CallbackList<void(int, int, int)> c3; - std::unique_ptr<CallbackList<void(int, int, int)>::Subscription> + RepeatingCallbackList<void(int, int, int)> c3; + std::unique_ptr<RepeatingCallbackList<void(int, int, int)>::Subscription> subscription3 = c3.Add(BindRepeating(&Summer::AddThreeParam, Unretained(&s))); c3.Notify(1, 2, 3); EXPECT_EQ(6, s.value()); - CallbackList<void(int, int, int, int)> c4; - std::unique_ptr<CallbackList<void(int, int, int, int)>::Subscription> + RepeatingCallbackList<void(int, int, int, int)> c4; + std::unique_ptr<RepeatingCallbackList<void(int, int, int, int)>::Subscription> subscription4 = c4.Add(BindRepeating(&Summer::AddFourParam, Unretained(&s))); c4.Notify(1, 2, 3, 4); EXPECT_EQ(10, s.value()); - CallbackList<void(int, int, int, int, int)> c5; - std::unique_ptr<CallbackList<void(int, int, int, int, int)>::Subscription> + RepeatingCallbackList<void(int, int, int, int, int)> c5; + std::unique_ptr< + RepeatingCallbackList<void(int, int, int, int, int)>::Subscription> subscription5 = c5.Add(BindRepeating(&Summer::AddFiveParam, Unretained(&s))); c5.Notify(1, 2, 3, 4, 5); EXPECT_EQ(15, s.value()); - CallbackList<void(int, int, int, int, int, int)> c6; + RepeatingCallbackList<void(int, int, int, int, int, int)> c6; std::unique_ptr< - CallbackList<void(int, int, int, int, int, int)>::Subscription> + RepeatingCallbackList<void(int, int, int, int, int, int)>::Subscription> subscription6 = c6.Add(BindRepeating(&Summer::AddSixParam, Unretained(&s))); @@ -168,12 +181,12 @@ TEST(CallbackListTest, ArityTest) { // Sanity check that closures added to the list will be run, and those removed // from the list will not be run. TEST(CallbackListTest, BasicTest) { - CallbackList<void(void)> cb_reg; + RepeatingClosureList cb_reg; Listener a, b, c; - std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription = + std::unique_ptr<RepeatingClosureList::Subscription> a_subscription = cb_reg.Add(BindRepeating(&Listener::IncrementTotal, Unretained(&a))); - std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription = + std::unique_ptr<RepeatingClosureList::Subscription> b_subscription = cb_reg.Add(BindRepeating(&Listener::IncrementTotal, Unretained(&b))); EXPECT_TRUE(a_subscription.get()); @@ -186,7 +199,7 @@ TEST(CallbackListTest, BasicTest) { b_subscription.reset(); - std::unique_ptr<CallbackList<void(void)>::Subscription> c_subscription = + std::unique_ptr<RepeatingClosureList::Subscription> c_subscription = cb_reg.Add(BindRepeating(&Listener::IncrementTotal, Unretained(&c))); cb_reg.Notify(); @@ -194,24 +207,54 @@ TEST(CallbackListTest, BasicTest) { EXPECT_EQ(2, a.total()); EXPECT_EQ(1, b.total()); EXPECT_EQ(1, c.total()); +} - a_subscription.reset(); +// Similar to BasicTest but with OnceCallbacks instead of Repeating. +TEST(CallbackListTest, OnceCallbacks) { + OnceClosureList cb_reg; + Listener a, b, c; + + std::unique_ptr<OnceClosureList::Subscription> a_subscription = + cb_reg.Add(BindOnce(&Listener::IncrementTotal, Unretained(&a))); + std::unique_ptr<OnceClosureList::Subscription> b_subscription = + cb_reg.Add(BindOnce(&Listener::IncrementTotal, Unretained(&b))); + + EXPECT_TRUE(a_subscription.get()); + EXPECT_TRUE(b_subscription.get()); + + cb_reg.Notify(); + + EXPECT_EQ(1, a.total()); + EXPECT_EQ(1, b.total()); + + // OnceCallbacks should auto-remove themselves after calling Notify(). + EXPECT_TRUE(cb_reg.empty()); + + // Destroying a subscription after the callback is canceled should not cause + // any problems. b_subscription.reset(); - c_subscription.reset(); + + std::unique_ptr<OnceClosureList::Subscription> c_subscription = + cb_reg.Add(BindOnce(&Listener::IncrementTotal, Unretained(&c))); + + cb_reg.Notify(); + + EXPECT_EQ(1, a.total()); + EXPECT_EQ(1, b.total()); + EXPECT_EQ(1, c.total()); } // Sanity check that callbacks with details added to the list will be run, with // the correct details, and those removed from the list will not be run. TEST(CallbackListTest, BasicTestWithParams) { - CallbackList<void(int)> cb_reg; + using CallbackListType = RepeatingCallbackList<void(int)>; + CallbackListType cb_reg; Listener a(1), b(-1), c(1); - std::unique_ptr<CallbackList<void(int)>::Subscription> a_subscription = - cb_reg.Add(BindRepeating(&Listener::IncrementByMultipleOfScaler, - Unretained(&a))); - std::unique_ptr<CallbackList<void(int)>::Subscription> b_subscription = - cb_reg.Add(BindRepeating(&Listener::IncrementByMultipleOfScaler, - Unretained(&b))); + std::unique_ptr<CallbackListType::Subscription> a_subscription = cb_reg.Add( + BindRepeating(&Listener::IncrementByMultipleOfScaler, Unretained(&a))); + std::unique_ptr<CallbackListType::Subscription> b_subscription = cb_reg.Add( + BindRepeating(&Listener::IncrementByMultipleOfScaler, Unretained(&b))); EXPECT_TRUE(a_subscription.get()); EXPECT_TRUE(b_subscription.get()); @@ -223,37 +266,34 @@ TEST(CallbackListTest, BasicTestWithParams) { b_subscription.reset(); - std::unique_ptr<CallbackList<void(int)>::Subscription> c_subscription = - cb_reg.Add(BindRepeating(&Listener::IncrementByMultipleOfScaler, - Unretained(&c))); + std::unique_ptr<CallbackListType::Subscription> c_subscription = cb_reg.Add( + BindRepeating(&Listener::IncrementByMultipleOfScaler, Unretained(&c))); cb_reg.Notify(10); EXPECT_EQ(20, a.total()); EXPECT_EQ(-10, b.total()); EXPECT_EQ(10, c.total()); - - a_subscription.reset(); - b_subscription.reset(); - c_subscription.reset(); } // Test the a callback can remove itself or a different callback from the list // during iteration without invalidating the iterator. TEST(CallbackListTest, RemoveCallbacksDuringIteration) { - CallbackList<void(void)> cb_reg; + RepeatingClosureList cb_reg; Listener a, b; - Remover remover_1, remover_2; - - std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub = - cb_reg.Add(BindRepeating(&Remover::IncrementTotalAndRemove, - Unretained(&remover_1))); - std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub = - cb_reg.Add(BindRepeating(&Remover::IncrementTotalAndRemove, - Unretained(&remover_2))); - std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription = + Remover<RepeatingClosureList> remover_1, remover_2; + + std::unique_ptr<RepeatingClosureList::Subscription> remover_1_sub = + cb_reg.Add( + BindRepeating(&Remover<RepeatingClosureList>::IncrementTotalAndRemove, + Unretained(&remover_1))); + std::unique_ptr<RepeatingClosureList::Subscription> remover_2_sub = + cb_reg.Add( + BindRepeating(&Remover<RepeatingClosureList>::IncrementTotalAndRemove, + Unretained(&remover_2))); + std::unique_ptr<RepeatingClosureList::Subscription> a_subscription = cb_reg.Add(BindRepeating(&Listener::IncrementTotal, Unretained(&a))); - std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription = + std::unique_ptr<RepeatingClosureList::Subscription> b_subscription = cb_reg.Add(BindRepeating(&Listener::IncrementTotal, Unretained(&b))); // |remover_1| will remove itself. @@ -279,16 +319,57 @@ TEST(CallbackListTest, RemoveCallbacksDuringIteration) { EXPECT_EQ(2, b.total()); } +// Similar to RemoveCallbacksDuringIteration but with OnceCallbacks instead of +// Repeating. +TEST(CallbackListTest, RemoveOnceCallbacksDuringIteration) { + OnceClosureList cb_reg; + Listener a, b; + Remover<OnceClosureList> remover_1, remover_2; + + std::unique_ptr<OnceClosureList::Subscription> remover_1_sub = + cb_reg.Add(BindOnce(&Remover<OnceClosureList>::IncrementTotalAndRemove, + Unretained(&remover_1))); + std::unique_ptr<OnceClosureList::Subscription> remover_2_sub = + cb_reg.Add(BindOnce(&Remover<OnceClosureList>::IncrementTotalAndRemove, + Unretained(&remover_2))); + std::unique_ptr<OnceClosureList::Subscription> a_subscription = + cb_reg.Add(BindOnce(&Listener::IncrementTotal, Unretained(&a))); + std::unique_ptr<OnceClosureList::Subscription> b_subscription = + cb_reg.Add(BindOnce(&Listener::IncrementTotal, Unretained(&b))); + + // |remover_1| will remove itself. + remover_1.SetSubscriptionToRemove(std::move(remover_1_sub)); + // |remover_2| will remove a. + remover_2.SetSubscriptionToRemove(std::move(a_subscription)); + + cb_reg.Notify(); + + // |remover_1| runs once (and removes itself), |remover_2| runs once (and + // removes a), |a| never runs, and |b| runs once. + EXPECT_EQ(1, remover_1.total()); + EXPECT_EQ(1, remover_2.total()); + EXPECT_EQ(0, a.total()); + EXPECT_EQ(1, b.total()); + + cb_reg.Notify(); + + // Nothing runs this time. + EXPECT_EQ(1, remover_1.total()); + EXPECT_EQ(1, remover_2.total()); + EXPECT_EQ(0, a.total()); + EXPECT_EQ(1, b.total()); +} + // Test that a callback can add another callback to the list durning iteration // without invalidating the iterator. The newly added callback should be run on // the current iteration as will all other callbacks in the list. TEST(CallbackListTest, AddCallbacksDuringIteration) { - CallbackList<void(void)> cb_reg; + RepeatingClosureList cb_reg; Adder a(&cb_reg); Listener b; - std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription = + std::unique_ptr<RepeatingClosureList::Subscription> a_subscription = cb_reg.Add(BindRepeating(&Adder::AddCallback, Unretained(&a))); - std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription = + std::unique_ptr<RepeatingClosureList::Subscription> b_subscription = cb_reg.Add(BindRepeating(&Listener::IncrementTotal, Unretained(&b))); cb_reg.Notify(); @@ -305,18 +386,18 @@ TEST(CallbackListTest, AddCallbacksDuringIteration) { // Sanity check: notifying an empty list is a no-op. TEST(CallbackListTest, EmptyList) { - CallbackList<void(void)> cb_reg; + RepeatingClosureList cb_reg; cb_reg.Notify(); } -TEST(CallbackList, RemovalCallback) { +TEST(CallbackListTest, RemovalCallback) { Counter remove_count; - CallbackList<void(void)> cb_reg; + RepeatingClosureList cb_reg; cb_reg.set_removal_callback( BindRepeating(&Counter::Increment, Unretained(&remove_count))); - std::unique_ptr<CallbackList<void(void)>::Subscription> subscription = + std::unique_ptr<RepeatingClosureList::Subscription> subscription = cb_reg.Add(DoNothing()); // Removing a subscription outside of iteration signals the callback. @@ -325,13 +406,15 @@ TEST(CallbackList, RemovalCallback) { EXPECT_EQ(1, remove_count.value()); // Configure two subscriptions to remove themselves. - Remover remover_1, remover_2; - std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub = - cb_reg.Add(BindRepeating(&Remover::IncrementTotalAndRemove, - Unretained(&remover_1))); - std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub = - cb_reg.Add(BindRepeating(&Remover::IncrementTotalAndRemove, - Unretained(&remover_2))); + Remover<RepeatingClosureList> remover_1, remover_2; + std::unique_ptr<RepeatingClosureList::Subscription> remover_1_sub = + cb_reg.Add( + BindRepeating(&Remover<RepeatingClosureList>::IncrementTotalAndRemove, + Unretained(&remover_1))); + std::unique_ptr<RepeatingClosureList::Subscription> remover_2_sub = + cb_reg.Add( + BindRepeating(&Remover<RepeatingClosureList>::IncrementTotalAndRemove, + Unretained(&remover_2))); remover_1.SetSubscriptionToRemove(std::move(remover_1_sub)); remover_2.SetSubscriptionToRemove(std::move(remover_2_sub)); @@ -342,11 +425,11 @@ TEST(CallbackList, RemovalCallback) { EXPECT_TRUE(cb_reg.empty()); } -TEST(CallbackList, AbandonSubscriptions) { +TEST(CallbackListTest, AbandonSubscriptions) { Listener listener; - std::unique_ptr<CallbackList<void(void)>::Subscription> subscription; + std::unique_ptr<RepeatingClosureList::Subscription> subscription; { - CallbackList<void(void)> cb_reg; + RepeatingClosureList cb_reg; subscription = cb_reg.Add( BindRepeating(&Listener::IncrementTotal, Unretained(&listener))); // Make sure the callback is signaled while cb_reg is in scope. @@ -354,8 +437,26 @@ TEST(CallbackList, AbandonSubscriptions) { // Exiting this scope and running the cb_reg destructor shouldn't fail. } EXPECT_EQ(1, listener.total()); - // The subscription from the destroyed callback list should be cancelled now. - EXPECT_TRUE(subscription->IsCancelled()); + + // Destroying the subscription after the list should not cause any problems. + subscription.reset(); +} + +TEST(CallbackListTest, CancelBeforeRunning) { + OnceClosureList cb_reg; + Listener a; + + std::unique_ptr<OnceClosureList::Subscription> a_subscription = + cb_reg.Add(BindOnce(&Listener::IncrementTotal, Unretained(&a))); + + EXPECT_TRUE(a_subscription.get()); + + // Canceling a OnceCallback before running it should not cause problems. + a_subscription.reset(); + cb_reg.Notify(); + + // |a| should not have received any callbacks. + EXPECT_EQ(0, a.total()); } } // namespace diff --git a/chromium/base/callback_unittest.cc b/chromium/base/callback_unittest.cc index 10c1c183458..d02defc6ba6 100644 --- a/chromium/base/callback_unittest.cc +++ b/chromium/base/callback_unittest.cc @@ -10,6 +10,7 @@ #include "base/bind.h" #include "base/callback_internal.h" #include "base/memory/ref_counted.h" +#include "base/notreached.h" #include "base/test/test_timeouts.h" #include "base/threading/thread.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/check.cc b/chromium/base/check.cc new file mode 100644 index 00000000000..5035cd52b3c --- /dev/null +++ b/chromium/base/check.cc @@ -0,0 +1,112 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/check.h" + +// check.h is a widely included header and its size has significant impact on +// build time. Try not to raise this limit unless absolutely necessary. See +// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md +#ifndef NACL_TC_REV +#pragma clang max_tokens_here 17000 +#endif + +#include "base/logging.h" +#include "build/build_config.h" + +namespace logging { + +CheckError CheckError::Check(const char* file, + int line, + const char* condition) { + CheckError check_error(new LogMessage(file, line, LOG_FATAL)); + check_error.stream() << "Check failed: " << condition << ". "; + return check_error; +} + +CheckError CheckError::CheckOp(const char* file, + int line, + CheckOpResult* check_op_result) { + CheckError check_error(new LogMessage(file, line, LOG_FATAL)); + check_error.stream() << "Check failed: " << check_op_result->message_; + free(check_op_result->message_); + check_op_result->message_ = nullptr; + return check_error; +} + +CheckError CheckError::DCheck(const char* file, + int line, + const char* condition) { + CheckError check_error(new LogMessage(file, line, LOG_DCHECK)); + check_error.stream() << "Check failed: " << condition << ". "; + return check_error; +} + +CheckError CheckError::DCheckOp(const char* file, + int line, + CheckOpResult* check_op_result) { + CheckError check_error(new LogMessage(file, line, LOG_DCHECK)); + check_error.stream() << "Check failed: " << check_op_result->message_; + free(check_op_result->message_); + check_op_result->message_ = nullptr; + return check_error; +} + +CheckError CheckError::PCheck(const char* file, + int line, + const char* condition) { + SystemErrorCode err_code = logging::GetLastSystemErrorCode(); +#if defined(OS_WIN) + CheckError check_error( + new Win32ErrorLogMessage(file, line, LOG_FATAL, err_code)); +#elif defined(OS_POSIX) || defined(OS_FUCHSIA) + CheckError check_error(new ErrnoLogMessage(file, line, LOG_FATAL, err_code)); +#endif + check_error.stream() << "Check failed: " << condition << ". "; + return check_error; +} + +CheckError CheckError::PCheck(const char* file, int line) { + return PCheck(file, line, ""); +} + +CheckError CheckError::DPCheck(const char* file, + int line, + const char* condition) { + SystemErrorCode err_code = logging::GetLastSystemErrorCode(); +#if defined(OS_WIN) + CheckError check_error( + new Win32ErrorLogMessage(file, line, LOG_DCHECK, err_code)); +#elif defined(OS_POSIX) || defined(OS_FUCHSIA) + CheckError check_error(new ErrnoLogMessage(file, line, LOG_DCHECK, err_code)); +#endif + check_error.stream() << "Check failed: " << condition << ". "; + return check_error; +} + +CheckError CheckError::NotImplemented(const char* file, + int line, + const char* function) { + CheckError check_error(new LogMessage(file, line, LOG_ERROR)); + check_error.stream() << "Not implemented reached in " << function; + return check_error; +} + +std::ostream& CheckError::stream() { + return log_message_->stream(); +} + +CheckError::~CheckError() { + // Note: This function ends up in crash stack traces. If its full name + // changes, the crash server's magic signature logic needs to be updated. + // See cl/306632920. + delete log_message_; +} + +CheckError::CheckError(LogMessage* log_message) : log_message_(log_message) {} + +void RawCheck(const char* message) { + RawLog(LOG_FATAL, message); +} + +} // namespace logging diff --git a/chromium/base/check.h b/chromium/base/check.h new file mode 100644 index 00000000000..086846d33a8 --- /dev/null +++ b/chromium/base/check.h @@ -0,0 +1,164 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_CHECK_H_ +#define BASE_CHECK_H_ + +#include <iosfwd> + +#include "base/base_export.h" +#include "base/compiler_specific.h" +#include "base/immediate_crash.h" + +// This header defines the CHECK, DCHECK, and DPCHECK macros. +// +// CHECK dies with a fatal error if its condition is not true. It is not +// controlled by NDEBUG, so the check will be executed regardless of compilation +// mode. +// +// DCHECK, the "debug mode" check, is enabled depending on NDEBUG and +// DCHECK_ALWAYS_ON, and its severity depends on DCHECK_IS_CONFIGURABLE. +// +// (D)PCHECK is like (D)CHECK, but includes the system error code (c.f. +// perror(3)). +// +// Additional information can be streamed to these macros and will be included +// in the log output if the condition doesn't hold (you may need to include +// <ostream>): +// +// CHECK(condition) << "Additional info."; +// +// The condition is evaluated exactly once. Even in build modes where e.g. +// DCHECK is disabled, the condition and any stream arguments are still +// referenced to avoid warnings about unused variables and functions. +// +// For the (D)CHECK_EQ, etc. macros, see base/check_op.h. However, that header +// is *significantly* larger than check.h, so try to avoid including it in +// header files. + +namespace logging { + +// Class used to explicitly ignore an ostream, and optionally a boolean value. +class VoidifyStream { + public: + VoidifyStream() = default; + explicit VoidifyStream(bool ignored) {} + + // This operator has lower precedence than << but higher than ?: + void operator&(std::ostream&) {} +}; + +// Helper macro which avoids evaluating the arguents to a stream if the +// condition is false. +#define LAZY_CHECK_STREAM(stream, condition) \ + !(condition) ? (void)0 : ::logging::VoidifyStream() & (stream) + +// Macro which uses but does not evaluate expr and any stream parameters. +#define EAT_CHECK_STREAM_PARAMS(expr) \ + true ? (void)0 \ + : ::logging::VoidifyStream(expr) & (*::logging::g_swallow_stream) +BASE_EXPORT extern std::ostream* g_swallow_stream; + +class CheckOpResult; +class LogMessage; + +// Class used for raising a check error upon destruction. +class BASE_EXPORT CheckError { + public: + static CheckError Check(const char* file, int line, const char* condition); + static CheckError CheckOp(const char* file, int line, CheckOpResult* result); + + static CheckError DCheck(const char* file, int line, const char* condition); + static CheckError DCheckOp(const char* file, int line, CheckOpResult* result); + + static CheckError PCheck(const char* file, int line, const char* condition); + static CheckError PCheck(const char* file, int line); + + static CheckError DPCheck(const char* file, int line, const char* condition); + + static CheckError NotImplemented(const char* file, + int line, + const char* function); + + // Stream for adding optional details to the error message. + std::ostream& stream(); + + ~CheckError(); + + CheckError(const CheckError& other) = delete; + CheckError& operator=(const CheckError& other) = delete; + CheckError(CheckError&& other) = default; + CheckError& operator=(CheckError&& other) = default; + + private: + explicit CheckError(LogMessage* log_message); + + LogMessage* log_message_; +}; + +#if defined(OFFICIAL_BUILD) && defined(NDEBUG) + +// Discard log strings to reduce code bloat. +// +// This is not calling BreakDebugger since this is called frequently, and +// calling an out-of-line function instead of a noreturn inline macro prevents +// compiler optimizations. +#define CHECK(condition) \ + UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS() + +#define PCHECK(condition) \ + LAZY_CHECK_STREAM( \ + ::logging::CheckError::PCheck(__FILE__, __LINE__).stream(), \ + UNLIKELY(!(condition))) + +#else + +#define CHECK(condition) \ + LAZY_CHECK_STREAM( \ + ::logging::CheckError::Check(__FILE__, __LINE__, #condition).stream(), \ + !ANALYZER_ASSUME_TRUE(condition)) + +#define PCHECK(condition) \ + LAZY_CHECK_STREAM( \ + ::logging::CheckError::PCheck(__FILE__, __LINE__, #condition).stream(), \ + !ANALYZER_ASSUME_TRUE(condition)) + +#endif + +#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) +#define DCHECK_IS_ON() false +#else +#define DCHECK_IS_ON() true +#endif + +#if DCHECK_IS_ON() + +#define DCHECK(condition) \ + LAZY_CHECK_STREAM( \ + ::logging::CheckError::DCheck(__FILE__, __LINE__, #condition).stream(), \ + !ANALYZER_ASSUME_TRUE(condition)) + +#define DPCHECK(condition) \ + LAZY_CHECK_STREAM( \ + ::logging::CheckError::DPCheck(__FILE__, __LINE__, #condition).stream(), \ + !ANALYZER_ASSUME_TRUE(condition)) + +#else + +#define DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition)) +#define DPCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition)) + +#endif + +// Async signal safe checking mechanism. +BASE_EXPORT void RawCheck(const char* message); +#define RAW_CHECK(condition) \ + do { \ + if (!(condition)) \ + ::logging::RawCheck("Check failed: " #condition "\n"); \ + } while (0) + +} // namespace logging + +#endif // BASE_CHECK_H_ diff --git a/chromium/base/check_example.cc b/chromium/base/check_example.cc index 7b9d8e6a80e..1f905a4baff 100644 --- a/chromium/base/check_example.cc +++ b/chromium/base/check_example.cc @@ -5,8 +5,10 @@ // This file is meant for analyzing the code generated by the CHECK // macros in a small executable file that's easy to disassemble. +#include <ostream> + +#include "base/check_op.h" #include "base/compiler_specific.h" -#include "base/logging.h" // An official build shouldn't generate code to print out messages for // the CHECK* macros, nor should it have the strings in the diff --git a/chromium/base/check_op.cc b/chromium/base/check_op.cc new file mode 100644 index 00000000000..4f887fc5787 --- /dev/null +++ b/chromium/base/check_op.cc @@ -0,0 +1,88 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/check_op.h" + +// check_op.h is a widely included header and its size has significant impact on +// build time. Try not to raise this limit unless absolutely necessary. See +// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md +#ifndef NACL_TC_REV +#pragma clang max_tokens_here 244000 +#endif + +#include <string.h> + +#include <cstdio> +#include <sstream> + +namespace logging { + +char* CheckOpValueStr(int v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%d", v); + return strdup(buf); +} + +char* CheckOpValueStr(unsigned v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%u", v); + return strdup(buf); +} + +char* CheckOpValueStr(long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%ld", v); + return strdup(buf); +} + +char* CheckOpValueStr(unsigned long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%lu", v); + return strdup(buf); +} + +char* CheckOpValueStr(long long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%lld", v); + return strdup(buf); +} + +char* CheckOpValueStr(unsigned long long v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%llu", v); + return strdup(buf); +} + +char* CheckOpValueStr(const void* v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%p", v); + return strdup(buf); +} + +char* CheckOpValueStr(std::nullptr_t v) { + return strdup("nullptr"); +} + +char* CheckOpValueStr(double v) { + char buf[50]; + snprintf(buf, sizeof(buf), "%.6lf", v); + return strdup(buf); +} + +char* StreamValToStr(const void* v, + void (*stream_func)(std::ostream&, const void*)) { + std::stringstream ss; + stream_func(ss, v); + return strdup(ss.str().c_str()); +} + +CheckOpResult::CheckOpResult(const char* expr_str, char* v1_str, char* v2_str) { + std::ostringstream ss; + ss << expr_str << " (" << v1_str << " vs. " << v2_str << ")"; + message_ = strdup(ss.str().c_str()); + free(v1_str); + free(v2_str); +} + +} // namespace logging diff --git a/chromium/base/check_op.h b/chromium/base/check_op.h new file mode 100644 index 00000000000..4816df4e8b0 --- /dev/null +++ b/chromium/base/check_op.h @@ -0,0 +1,228 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_CHECK_OP_H_ +#define BASE_CHECK_OP_H_ + +#include <cstddef> +#include <type_traits> + +#include "base/check.h" +#include "base/template_util.h" + +// This header defines the (DP)CHECK_EQ etc. macros. +// +// (DP)CHECK_EQ(x, y) is similar to (DP)CHECK(x == y) but will also log the +// values of x and y if the condition doesn't hold. This works for basic types +// and types with an operator<< or .ToString() method. +// +// The operands are evaluated exactly once, and even in build modes where e.g. +// DCHECK is disabled, the operands and their stringification methods are still +// referenced to avoid warnings about unused variables or functions. +// +// To support the stringification of the check operands, this header is +// *significantly* larger than base/check.h, so it should be avoided in common +// headers. +// +// This header also provides the (DP)CHECK macros (by including check.h), so if +// you use e.g. both CHECK_EQ and CHECK, including this header is enough. If you +// only use CHECK however, please include the smaller check.h instead. + +namespace logging { + +// Functions for turning check operand values into strings. +// Caller takes ownership of the returned string. +BASE_EXPORT char* CheckOpValueStr(int v); +BASE_EXPORT char* CheckOpValueStr(unsigned v); +BASE_EXPORT char* CheckOpValueStr(long v); +BASE_EXPORT char* CheckOpValueStr(unsigned long v); +BASE_EXPORT char* CheckOpValueStr(long long v); +BASE_EXPORT char* CheckOpValueStr(unsigned long long v); +BASE_EXPORT char* CheckOpValueStr(const void* v); +BASE_EXPORT char* CheckOpValueStr(std::nullptr_t v); +BASE_EXPORT char* CheckOpValueStr(double v); + +// Convert a streamable value to string out-of-line to avoid <sstream>. +BASE_EXPORT char* StreamValToStr(const void* v, + void (*stream_func)(std::ostream&, + const void*)); + +#ifndef __has_builtin +#define __has_builtin(x) 0 // Compatibility with non-clang compilers. +#endif + +template <typename T> +inline typename std::enable_if< + base::internal::SupportsOstreamOperator<const T&>::value && + !std::is_function<typename std::remove_pointer<T>::type>::value, + char*>::type +CheckOpValueStr(const T& v) { + auto f = [](std::ostream& s, const void* p) { + s << *reinterpret_cast<const T*>(p); + }; + + // operator& might be overloaded, so do the std::addressof dance. + // __builtin_addressof is preferred since it also handles Obj-C ARC pointers. + // Some casting is still needed, because T might be volatile. +#if __has_builtin(__builtin_addressof) + const void* vp = const_cast<const void*>( + reinterpret_cast<const volatile void*>(__builtin_addressof(v))); +#else + const void* vp = reinterpret_cast<const void*>( + const_cast<const char*>(&reinterpret_cast<const volatile char&>(v))); +#endif + return StreamValToStr(vp, f); +} + +// Overload for types that have no operator<< but do have .ToString() defined. +template <typename T> +inline typename std::enable_if< + !base::internal::SupportsOstreamOperator<const T&>::value && + base::internal::SupportsToString<const T&>::value, + char*>::type +CheckOpValueStr(const T& v) { + // .ToString() may not return a std::string, e.g. blink::WTF::String. + return CheckOpValueStr(v.ToString()); +} + +// Provide an overload for functions and function pointers. Function pointers +// don't implicitly convert to void* but do implicitly convert to bool, so +// without this function pointers are always printed as 1 or 0. (MSVC isn't +// standards-conforming here and converts function pointers to regular +// pointers, so this is a no-op for MSVC.) +template <typename T> +inline typename std::enable_if< + std::is_function<typename std::remove_pointer<T>::type>::value, + char*>::type +CheckOpValueStr(const T& v) { + return CheckOpValueStr(reinterpret_cast<const void*>(v)); +} + +// We need overloads for enums that don't support operator<<. +// (i.e. scoped enums where no operator<< overload was declared). +template <typename T> +inline typename std::enable_if< + !base::internal::SupportsOstreamOperator<const T&>::value && + std::is_enum<T>::value, + char*>::type +CheckOpValueStr(const T& v) { + return CheckOpValueStr( + static_cast<typename std::underlying_type<T>::type>(v)); +} + +// Captures the result of a CHECK_op and facilitates testing as a boolean. +class CheckOpResult { + public: + // An empty result signals success. + constexpr CheckOpResult() = default; + + // A non-success result. expr_str is something like "foo != bar". v1_str and + // v2_str are the stringified run-time values of foo and bar. Takes ownership + // of v1_str and v2_str. + BASE_EXPORT CheckOpResult(const char* expr_str, char* v1_str, char* v2_str); + + // Returns true if the check succeeded. + constexpr explicit operator bool() const { return !message_; } + + friend class CheckError; + + private: + char* message_ = nullptr; +}; + +#if defined(OFFICIAL_BUILD) && defined(NDEBUG) + +// Discard log strings to reduce code bloat. +#define CHECK_OP(name, op, val1, val2) CHECK((val1)op(val2)) + +#else + +// Helper macro for binary operators. +// The 'switch' is used to prevent the 'else' from being ambiguous when the +// macro is used in an 'if' clause such as: +// if (a == 1) +// CHECK_EQ(2, a); +#define CHECK_OP(name, op, val1, val2) \ + switch (0) \ + case 0: \ + default: \ + if (::logging::CheckOpResult true_if_passed = \ + ::logging::Check##name##Impl((val1), (val2), \ + #val1 " " #op " " #val2)) \ + ; \ + else \ + ::logging::CheckError::CheckOp(__FILE__, __LINE__, &true_if_passed) \ + .stream() + +#endif + +// The int-int overload avoids address-taking static int members. +#define DEFINE_CHECK_OP_IMPL(name, op) \ + template <typename T, typename U> \ + constexpr ::logging::CheckOpResult Check##name##Impl( \ + const T& v1, const U& v2, const char* expr_str) { \ + if (ANALYZER_ASSUME_TRUE(v1 op v2)) \ + return ::logging::CheckOpResult(); \ + return ::logging::CheckOpResult(expr_str, CheckOpValueStr(v1), \ + CheckOpValueStr(v2)); \ + } \ + constexpr ::logging::CheckOpResult Check##name##Impl(int v1, int v2, \ + const char* expr_str) { \ + if (ANALYZER_ASSUME_TRUE(v1 op v2)) \ + return ::logging::CheckOpResult(); \ + return ::logging::CheckOpResult(expr_str, CheckOpValueStr(v1), \ + CheckOpValueStr(v2)); \ + } + +// clang-format off +DEFINE_CHECK_OP_IMPL(EQ, ==) +DEFINE_CHECK_OP_IMPL(NE, !=) +DEFINE_CHECK_OP_IMPL(LE, <=) +DEFINE_CHECK_OP_IMPL(LT, < ) +DEFINE_CHECK_OP_IMPL(GE, >=) +DEFINE_CHECK_OP_IMPL(GT, > ) +#undef DEFINE_CHECK_OP_IMPL +#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2) +#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2) +#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2) +#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2) +#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2) +#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2) +// clang-format on + +#if DCHECK_IS_ON() + +#define DCHECK_OP(name, op, val1, val2) \ + switch (0) \ + case 0: \ + default: \ + if (::logging::CheckOpResult true_if_passed = \ + ::logging::Check##name##Impl((val1), (val2), \ + #val1 " " #op " " #val2)) \ + ; \ + else \ + ::logging::CheckError::DCheckOp(__FILE__, __LINE__, &true_if_passed) \ + .stream() + +#else + +// Don't do any evaluation but still reference the same stuff as when enabled. +#define DCHECK_OP(name, op, val1, val2) \ + EAT_CHECK_STREAM_PARAMS((::logging::CheckOpValueStr(val1), \ + ::logging::CheckOpValueStr(val2), (val1)op(val2))) + +#endif + +// clang-format off +#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2) +#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2) +#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2) +#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2) +#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2) +#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2) +// clang-format on + +} // namespace logging + +#endif // BASE_CHECK_OP_H_ diff --git a/chromium/base/check_unittest.cc b/chromium/base/check_unittest.cc new file mode 100644 index 00000000000..0ae92fc87e1 --- /dev/null +++ b/chromium/base/check_unittest.cc @@ -0,0 +1,481 @@ +// Copyright (c) 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/bind.h" +#include "base/callback.h" +#include "base/logging.h" +#include "base/strings/string_piece.h" +#include "base/test/gtest_util.h" +#include "base/test/scoped_feature_list.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +// Helper class which expects a check to fire with a certain location and +// message before the end of the current scope. +class ScopedCheckExpectation { + public: + ScopedCheckExpectation(const char* file, int line, std::string msg) + : file_(file), + line_(line), + msg_(msg), + assert_handler_(base::BindRepeating(&ScopedCheckExpectation::Check, + base::Unretained(this))), + fired_(false) {} + ~ScopedCheckExpectation() { + EXPECT_TRUE(fired_) << "CHECK at " << file_ << ":" << line_ + << " never fired!"; + } + + private: + void Check(const char* file, + int line, + const base::StringPiece msg, + const base::StringPiece stack) { + fired_ = true; + EXPECT_EQ(file, file_); + EXPECT_EQ(line, line_); + if (msg_.find("=~") == 0) { + EXPECT_THAT(std::string(msg), testing::MatchesRegex(msg_.substr(2))); + } else { + EXPECT_EQ(std::string(msg), msg_); + } + } + + std::string file_; + int line_; + std::string msg_; + logging::ScopedLogAssertHandler assert_handler_; + bool fired_; +}; + +// Macro which expects a CHECK to fire with a certain message. If msg starts +// with "=~", it's interpreted as a regular expression. +// Example: EXPECT_CHECK("Check failed: false.", CHECK(false)); +#if defined(OFFICIAL_BUILD) && defined(NDEBUG) +#define EXPECT_CHECK(msg, check_expr) \ + do { \ + EXPECT_CHECK_DEATH(check_expr); \ + } while (0) +#else +#define EXPECT_CHECK(msg, check_expr) \ + do { \ + ScopedCheckExpectation check_exp(__FILE__, __LINE__, msg); \ + check_expr; \ + } while (0) +#endif + +// Macro which expects a DCHECK to fire if DCHECKs are enabled. +#define EXPECT_DCHECK(msg, check_expr) \ + do { \ + if (DCHECK_IS_ON() && logging::LOG_DCHECK == logging::LOG_FATAL) { \ + ScopedCheckExpectation check_exp(__FILE__, __LINE__, msg); \ + check_expr; \ + } else { \ + check_expr; \ + } \ + } while (0) + +class CheckTest : public testing::Test {}; + +TEST_F(CheckTest, Basics) { + EXPECT_CHECK("Check failed: false. ", CHECK(false)); + + EXPECT_CHECK("Check failed: false. foo", CHECK(false) << "foo"); + + double a = 2, b = 1; + EXPECT_CHECK("Check failed: a < b (2.000000 vs. 1.000000)", CHECK_LT(a, b)); + + EXPECT_CHECK("Check failed: a < b (2.000000 vs. 1.000000)foo", + CHECK_LT(a, b) << "foo"); +} + +TEST_F(CheckTest, PCheck) { + const char file[] = "/nonexistentfile123"; + ignore_result(fopen(file, "r")); + std::string err = + logging::SystemErrorCodeToString(logging::GetLastSystemErrorCode()); + + EXPECT_CHECK( + "Check failed: fopen(file, \"r\") != nullptr." + " : " + + err, + PCHECK(fopen(file, "r") != nullptr)); + + EXPECT_CHECK( + "Check failed: fopen(file, \"r\") != nullptr." + " foo: " + + err, + PCHECK(fopen(file, "r") != nullptr) << "foo"); + + EXPECT_DCHECK( + "Check failed: fopen(file, \"r\") != nullptr." + " : " + + err, + DPCHECK(fopen(file, "r") != nullptr)); + + EXPECT_DCHECK( + "Check failed: fopen(file, \"r\") != nullptr." + " foo: " + + err, + DPCHECK(fopen(file, "r") != nullptr) << "foo"); +} + +TEST_F(CheckTest, CheckOp) { + int a = 1, b = 2; + // clang-format off + EXPECT_CHECK("Check failed: a == b (1 vs. 2)", CHECK_EQ(a, b)); + EXPECT_CHECK("Check failed: a != a (1 vs. 1)", CHECK_NE(a, a)); + EXPECT_CHECK("Check failed: b <= a (2 vs. 1)", CHECK_LE(b, a)); + EXPECT_CHECK("Check failed: b < a (2 vs. 1)", CHECK_LT(b, a)); + EXPECT_CHECK("Check failed: a >= b (1 vs. 2)", CHECK_GE(a, b)); + EXPECT_CHECK("Check failed: a > b (1 vs. 2)", CHECK_GT(a, b)); + + EXPECT_DCHECK("Check failed: a == b (1 vs. 2)", DCHECK_EQ(a, b)); + EXPECT_DCHECK("Check failed: a != a (1 vs. 1)", DCHECK_NE(a, a)); + EXPECT_DCHECK("Check failed: b <= a (2 vs. 1)", DCHECK_LE(b, a)); + EXPECT_DCHECK("Check failed: b < a (2 vs. 1)", DCHECK_LT(b, a)); + EXPECT_DCHECK("Check failed: a >= b (1 vs. 2)", DCHECK_GE(a, b)); + EXPECT_DCHECK("Check failed: a > b (1 vs. 2)", DCHECK_GT(a, b)); + // clang-format on +} + +TEST_F(CheckTest, CheckStreamsAreLazy) { + int called_count = 0; + int not_called_count = 0; + + auto Called = [&]() { + ++called_count; + return 42; + }; + auto NotCalled = [&]() { + ++not_called_count; + return 42; + }; + + CHECK(Called()) << NotCalled(); + CHECK_EQ(Called(), Called()) << NotCalled(); + PCHECK(Called()) << NotCalled(); + + DCHECK(Called()) << NotCalled(); + DCHECK_EQ(Called(), Called()) << NotCalled(); + DPCHECK(Called()) << NotCalled(); + + EXPECT_EQ(not_called_count, 0); +#if DCHECK_IS_ON() + EXPECT_EQ(called_count, 8); +#else + EXPECT_EQ(called_count, 4); +#endif +} + +void DcheckEmptyFunction1() { + // Provide a body so that Release builds do not cause the compiler to + // optimize DcheckEmptyFunction1 and DcheckEmptyFunction2 as a single + // function, which breaks the Dcheck tests below. + LOG(INFO) << "DcheckEmptyFunction1"; +} +void DcheckEmptyFunction2() {} + +#if defined(DCHECK_IS_CONFIGURABLE) +class ScopedDcheckSeverity { + public: + ScopedDcheckSeverity(logging::LogSeverity new_severity) + : old_severity_(logging::LOG_DCHECK) { + logging::LOG_DCHECK = new_severity; + } + + ~ScopedDcheckSeverity() { logging::LOG_DCHECK = old_severity_; } + + private: + logging::LogSeverity old_severity_; +}; +#endif // defined(DCHECK_IS_CONFIGURABLE) + +// https://crbug.com/709067 tracks test flakiness on iOS. +#if defined(OS_IOS) +#define MAYBE_Dcheck DISABLED_Dcheck +#else +#define MAYBE_Dcheck Dcheck +#endif +TEST_F(CheckTest, MAYBE_Dcheck) { +#if defined(DCHECK_IS_CONFIGURABLE) + // DCHECKs are enabled, and LOG_DCHECK is mutable, but defaults to non-fatal. + // Set it to LOG_FATAL to get the expected behavior from the rest of this + // test. + ScopedDcheckSeverity dcheck_severity(logging::LOG_FATAL); +#endif // defined(DCHECK_IS_CONFIGURABLE) + +#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) + // Release build. + EXPECT_FALSE(DCHECK_IS_ON()); + EXPECT_FALSE(DLOG_IS_ON(DCHECK)); +#elif defined(NDEBUG) && defined(DCHECK_ALWAYS_ON) + // Release build with real DCHECKS. + EXPECT_TRUE(DCHECK_IS_ON()); + EXPECT_TRUE(DLOG_IS_ON(DCHECK)); +#else + // Debug build. + EXPECT_TRUE(DCHECK_IS_ON()); + EXPECT_TRUE(DLOG_IS_ON(DCHECK)); +#endif + + EXPECT_DCHECK("Check failed: false. ", DCHECK(false)); + std::string err = + logging::SystemErrorCodeToString(logging::GetLastSystemErrorCode()); + EXPECT_DCHECK("Check failed: false. : " + err, DPCHECK(false)); + EXPECT_DCHECK("Check failed: 0 == 1 (0 vs. 1)", DCHECK_EQ(0, 1)); + + // Test DCHECK on std::nullptr_t + const void* p_null = nullptr; + const void* p_not_null = &p_null; + DCHECK_EQ(p_null, nullptr); + DCHECK_EQ(nullptr, p_null); + DCHECK_NE(p_not_null, nullptr); + DCHECK_NE(nullptr, p_not_null); + + // Test DCHECK on a scoped enum. + enum class Animal { DOG, CAT }; + DCHECK_EQ(Animal::DOG, Animal::DOG); + EXPECT_DCHECK("Check failed: Animal::DOG == Animal::CAT (0 vs. 1)", + DCHECK_EQ(Animal::DOG, Animal::CAT)); + + // Test DCHECK on functions and function pointers. + struct MemberFunctions { + void MemberFunction1() { + // See the comment in DcheckEmptyFunction1(). + LOG(INFO) << "Do not merge with MemberFunction2."; + } + void MemberFunction2() {} + }; + void (MemberFunctions::*mp1)() = &MemberFunctions::MemberFunction1; + void (MemberFunctions::*mp2)() = &MemberFunctions::MemberFunction2; + void (*fp1)() = DcheckEmptyFunction1; + void (*fp2)() = DcheckEmptyFunction2; + void (*fp3)() = DcheckEmptyFunction1; + DCHECK_EQ(fp1, fp3); + DCHECK_EQ(mp1, &MemberFunctions::MemberFunction1); + DCHECK_EQ(mp2, &MemberFunctions::MemberFunction2); + EXPECT_DCHECK("=~Check failed: fp1 == fp2 \\(\\w+ vs. \\w+\\)", + DCHECK_EQ(fp1, fp2)); + EXPECT_DCHECK( + "Check failed: mp2 == &MemberFunctions::MemberFunction1 (1 vs. 1)", + DCHECK_EQ(mp2, &MemberFunctions::MemberFunction1)); +} + +TEST_F(CheckTest, DcheckReleaseBehavior) { + int var1 = 1; + int var2 = 2; + int var3 = 3; + int var4 = 4; + + // No warnings about unused variables even though no check fires and DCHECK + // may or may not be enabled. + DCHECK(var1) << var2; + DPCHECK(var1) << var3; + DCHECK_EQ(var1, 1) << var4; +} + +TEST_F(CheckTest, DCheckEqStatements) { + bool reached = false; + if (false) + DCHECK_EQ(false, true); // Unreached. + else + DCHECK_EQ(true, reached = true); // Reached, passed. + ASSERT_EQ(DCHECK_IS_ON() ? true : false, reached); + + if (false) + DCHECK_EQ(false, true); // Unreached. +} + +TEST_F(CheckTest, CheckEqStatements) { + bool reached = false; + if (false) + CHECK_EQ(false, true); // Unreached. + else + CHECK_EQ(true, reached = true); // Reached, passed. + ASSERT_TRUE(reached); + + if (false) + CHECK_EQ(false, true); // Unreached. +} + +#if defined(DCHECK_IS_CONFIGURABLE) +TEST_F(CheckTest, ConfigurableDCheck) { + // Verify that DCHECKs default to non-fatal in configurable-DCHECK builds. + // Note that we require only that DCHECK is non-fatal by default, rather + // than requiring that it be exactly INFO, ERROR, etc level. + EXPECT_LT(logging::LOG_DCHECK, logging::LOG_FATAL); + DCHECK(false); + + // Verify that DCHECK* aren't hard-wired to crash on failure. + logging::LOG_DCHECK = logging::LOG_INFO; + DCHECK(false); + DCHECK_EQ(1, 2); + + // Verify that DCHECK does crash if LOG_DCHECK is set to LOG_FATAL. + logging::LOG_DCHECK = logging::LOG_FATAL; + EXPECT_CHECK("Check failed: false. ", DCHECK(false)); + EXPECT_CHECK("Check failed: 1 == 2 (1 vs. 2)", DCHECK_EQ(1, 2)); +} + +TEST_F(CheckTest, ConfigurableDCheckFeature) { + // Initialize FeatureList with and without DcheckIsFatal, and verify the + // value of LOG_DCHECK. Note that we don't require that DCHECK take a + // specific value when the feature is off, only that it is non-fatal. + + { + base::test::ScopedFeatureList feature_list; + feature_list.InitFromCommandLine("DcheckIsFatal", ""); + EXPECT_EQ(logging::LOG_DCHECK, logging::LOG_FATAL); + } + + { + base::test::ScopedFeatureList feature_list; + feature_list.InitFromCommandLine("", "DcheckIsFatal"); + EXPECT_LT(logging::LOG_DCHECK, logging::LOG_FATAL); + } + + // The default case is last, so we leave LOG_DCHECK in the default state. + { + base::test::ScopedFeatureList feature_list; + feature_list.InitFromCommandLine("", ""); + EXPECT_LT(logging::LOG_DCHECK, logging::LOG_FATAL); + } +} +#endif // defined(DCHECK_IS_CONFIGURABLE) + +struct StructWithOstream { + bool operator==(const StructWithOstream& o) const { return &o == this; } +}; +#if !(defined(OFFICIAL_BUILD) && defined(NDEBUG)) +std::ostream& operator<<(std::ostream& out, const StructWithOstream&) { + return out << "ostream"; +} +#endif + +struct StructWithToString { + bool operator==(const StructWithToString& o) const { return &o == this; } + std::string ToString() const { return "ToString"; } +}; + +struct StructWithToStringAndOstream { + bool operator==(const StructWithToStringAndOstream& o) const { + return &o == this; + } + std::string ToString() const { return "ToString"; } +}; +#if !(defined(OFFICIAL_BUILD) && defined(NDEBUG)) +std::ostream& operator<<(std::ostream& out, + const StructWithToStringAndOstream&) { + return out << "ostream"; +} +#endif + +struct StructWithToStringNotStdString { + struct PseudoString {}; + + bool operator==(const StructWithToStringNotStdString& o) const { + return &o == this; + } + PseudoString ToString() const { return PseudoString(); } +}; +#if !(defined(OFFICIAL_BUILD) && defined(NDEBUG)) +std::ostream& operator<<(std::ostream& out, + const StructWithToStringNotStdString::PseudoString&) { + return out << "ToString+ostream"; +} +#endif + +TEST_F(CheckTest, OstreamVsToString) { + StructWithOstream a, b; + EXPECT_CHECK("Check failed: a == b (ostream vs. ostream)", CHECK_EQ(a, b)); + + StructWithToString c, d; + EXPECT_CHECK("Check failed: c == d (ToString vs. ToString)", CHECK_EQ(c, d)); + + StructWithToStringAndOstream e, f; + EXPECT_CHECK("Check failed: e == f (ostream vs. ostream)", CHECK_EQ(e, f)); + + StructWithToStringNotStdString g, h; + EXPECT_CHECK("Check failed: g == h (ToString+ostream vs. ToString+ostream)", + CHECK_EQ(g, h)); +} + +#define EXPECT_LOG_ERROR(msg, expr, expected_line) \ + do { \ + static bool got_log_message = false; \ + ASSERT_EQ(logging::GetLogMessageHandler(), nullptr); \ + logging::SetLogMessageHandler([](int severity, const char* file, int line, \ + size_t message_start, \ + const std::string& str) { \ + EXPECT_FALSE(got_log_message); \ + got_log_message = true; \ + EXPECT_EQ(severity, logging::LOG_ERROR); \ + EXPECT_EQ(str.substr(message_start), (msg)); \ + EXPECT_STREQ(__FILE__, file); \ + EXPECT_EQ(expected_line, line); \ + return true; \ + }); \ + expr; \ + EXPECT_TRUE(got_log_message); \ + logging::SetLogMessageHandler(nullptr); \ + } while (0) + +#define EXPECT_NO_LOG(expr) \ + do { \ + ASSERT_EQ(logging::GetLogMessageHandler(), nullptr); \ + logging::SetLogMessageHandler([](int severity, const char* file, int line, \ + size_t message_start, \ + const std::string& str) { \ + EXPECT_TRUE(false) << "Unexpected log: " << str; \ + return true; \ + }); \ + expr; \ + logging::SetLogMessageHandler(nullptr); \ + } while (0) + +TEST_F(CheckTest, NotReached) { +#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED) + // Expect LOG(ERROR) without the streamed params. + EXPECT_LOG_ERROR("NOTREACHED() hit.\n", NOTREACHED() << "foo", __LINE__); +#else + // Expect a DCHECK with streamed params intact. + EXPECT_DCHECK("Check failed: false. foo", NOTREACHED() << "foo"); +#endif +} + +TEST_F(CheckTest, NotImplemented) { + static const std::string expected_msg = + std::string("Not implemented reached in ") + __PRETTY_FUNCTION__; + +#if DCHECK_IS_ON() + // Expect LOG(ERROR) with streamed params intact. + EXPECT_LOG_ERROR(expected_msg + "foo\n", NOTIMPLEMENTED() << "foo", __LINE__); +#else + // Expect nothing. + EXPECT_NO_LOG(NOTIMPLEMENTED() << "foo"); +#endif +} + +void NiLogOnce() { + // Note: The stream param is not logged. + NOTIMPLEMENTED_LOG_ONCE() << "foo"; +} + +TEST_F(CheckTest, NotImplementedLogOnce) { + static const std::string expected_msg = + "Not implemented reached in void (anonymous namespace)::NiLogOnce()\n"; + +#if DCHECK_IS_ON() + EXPECT_LOG_ERROR(expected_msg, NiLogOnce(), __LINE__ - 8); + EXPECT_NO_LOG(NiLogOnce()); +#else + EXPECT_NO_LOG(NiLogOnce()); + EXPECT_NO_LOG(NiLogOnce()); +#endif +} + +} // namespace diff --git a/chromium/base/compiler_specific.h b/chromium/base/compiler_specific.h index 29625375fbf..c8a76491e49 100644 --- a/chromium/base/compiler_specific.h +++ b/chromium/base/compiler_specific.h @@ -47,6 +47,20 @@ #define ALWAYS_INLINE inline #endif +// Annotate a function indicating it should never be tail called. Useful to make +// sure callers of the annotated function are never omitted from call-stacks. +// To provide the complementary behavior (prevent the annotated function from +// being omitted) look at NOINLINE. Also note that this doesn't prevent code +// folding of multiple identical caller functions into a single signature. To +// prevent code folding, see base::debug::Alias. +// Use like: +// void NOT_TAIL_CALLED FooBar(); +#if defined(__clang__) && __has_attribute(not_tail_called) +#define NOT_TAIL_CALLED __attribute__((not_tail_called)) +#else +#define NOT_TAIL_CALLED +#endif + // Specify memory alignment for structs, classes, etc. // Use like: // class ALIGNAS(16) MyClass { ... } @@ -247,4 +261,35 @@ #define STACK_UNINITIALIZED #endif +// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints +// to Clang which control what code paths are statically analyzed, +// and is meant to be used in conjunction with assert & assert-like functions. +// The expression is passed straight through if analysis isn't enabled. +// +// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current +// codepath and any other branching codepaths that might follow. +#if defined(__clang_analyzer__) + +inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) { + return false; +} + +inline constexpr bool AnalyzerAssumeTrue(bool arg) { + // AnalyzerNoReturn() is invoked and analysis is terminated if |arg| is + // false. + return arg || AnalyzerNoReturn(); +} + +#define ANALYZER_ASSUME_TRUE(arg) ::AnalyzerAssumeTrue(!!(arg)) +#define ANALYZER_SKIP_THIS_PATH() static_cast<void>(::AnalyzerNoReturn()) +#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var); + +#else // !defined(__clang_analyzer__) + +#define ANALYZER_ASSUME_TRUE(arg) (arg) +#define ANALYZER_SKIP_THIS_PATH() +#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var); + +#endif // defined(__clang_analyzer__) + #endif // BASE_COMPILER_SPECIFIC_H_ diff --git a/chromium/base/containers/checked_iterators.h b/chromium/base/containers/checked_iterators.h index 66d927d2465..986f6fad102 100644 --- a/chromium/base/containers/checked_iterators.h +++ b/chromium/base/containers/checked_iterators.h @@ -28,6 +28,69 @@ class CheckedContiguousIterator { friend class CheckedContiguousIterator; constexpr CheckedContiguousIterator() = default; + +#if defined(_LIBCPP_VERSION) + // The following using declaration, single argument implicit constructor and + // friended `__unwrap_iter` overload are required to use an optimized code + // path when using a CheckedContiguousIterator with libc++ algorithms such as + // std::copy(first, last, result), std::copy_backward(first, last, result), + // std::move(first, last, result) and std::move_backward(first, last, result). + // + // Each of these algorithms dispatches to a std::memmove if this is safe to do + // so, i.e. when all of `first`, `last` and `result` are iterators over + // contiguous storage of the same type modulo const qualifiers. + // + // libc++ implements this for its contiguous iterators by invoking the + // unqualified __unwrap_iter, which returns the underlying pointer for + // iterators over std::vector and std::string, and returns the original + // iterator otherwise. + // + // Thus in order to opt into this optimization for CCI, we need to provide our + // own __unwrap_iter, returning the underlying raw pointer if it is safe to do + // so. + // + // Furthermore, considering that std::copy is implemented as follows, the + // return type of __unwrap_iter(CCI) needs to be convertible to CCI, which is + // why an appropriate implicit single argument constructor is provided for the + // optimized case: + // + // template <class InIter, class OutIter> + // OutIter copy(InIter first, InIter last, OutIter result) { + // return __copy(__unwrap_iter(first), __unwrap_iter(last), + // __unwrap_iter(result)); + // } + // + // Unoptimized __copy() signature: + // template <class InIter, class OutIter> + // OutIter __copy(InIter first, InIter last, OutIter result); + // + // Optimized __copy() signature: + // template <class T, class U> + // U* __copy(T* first, T* last, U* result); + // + // Finally, this single argument constructor sets all internal fields to the + // passed in pointer. This allows the resulting CCI to be used in other + // optimized calls to std::copy (or std::move, std::copy_backward, + // std::move_backward). However, it should not be used otherwise, since + // invoking any of its public API will result in a CHECK failure. This also + // means that callers should never use the single argument constructor + // directly. + template <typename U> + using PtrIfSafeToMemmove = std::enable_if_t< + std::is_trivially_copy_assignable<std::remove_const_t<U>>::value, + U*>; + + template <int&... ExplicitArgumentBarrier, typename U = T> + constexpr CheckedContiguousIterator(PtrIfSafeToMemmove<U> ptr) + : start_(ptr), current_(ptr), end_(ptr) {} + + template <int&... ExplicitArgumentBarrier, typename U = T> + friend constexpr PtrIfSafeToMemmove<U> __unwrap_iter( + CheckedContiguousIterator iter) { + return iter.current_; + } +#endif + constexpr CheckedContiguousIterator(T* start, const T* end) : CheckedContiguousIterator(start, start, end) {} constexpr CheckedContiguousIterator(const T* start, T* current, const T* end) diff --git a/chromium/base/containers/checked_iterators_unittest.cc b/chromium/base/containers/checked_iterators_unittest.cc index 9004a020b52..b4a44322d3d 100644 --- a/chromium/base/containers/checked_iterators_unittest.cc +++ b/chromium/base/containers/checked_iterators_unittest.cc @@ -4,6 +4,9 @@ #include "base/containers/checked_iterators.h" +#include <algorithm> +#include <iterator> + #include "testing/gtest/include/gtest/gtest.h" namespace base { @@ -77,4 +80,80 @@ TEST(CheckedContiguousIterator, ConvertingComparisonOperators) { EXPECT_GE(cbegin, begin); } +#if defined(_LIBCPP_VERSION) +namespace { + +// Helper template that wraps an iterator and disables its dereference and +// increment operations. +template <typename Iterator> +struct DisableDerefAndIncr : Iterator { + using Iterator::Iterator; + + void operator*() = delete; + void operator++() = delete; + void operator++(int) = delete; +}; + +template <typename Iterator> +auto __unwrap_iter(DisableDerefAndIncr<Iterator> iter) { + return __unwrap_iter(static_cast<Iterator>(iter)); +} + +} // namespace + +// Tests that using std::copy with CheckedContiguousIterator<int> results in an +// optimized code-path that does not invoke the iterator's dereference and +// increment operations. This would fail to compile if std::copy was not +// optimized. +TEST(CheckedContiguousIterator, OptimizedCopy) { + using Iter = DisableDerefAndIncr<CheckedContiguousIterator<int>>; + static_assert(std::is_same<int*, decltype(__unwrap_iter(Iter()))>::value, + "Error: Iter should unwrap to int*"); + + int arr_in[5] = {1, 2, 3, 4, 5}; + int arr_out[5]; + + Iter begin(std::begin(arr_in), std::end(arr_in)); + Iter end(std::begin(arr_in), std::end(arr_in), std::end(arr_in)); + std::copy(begin, end, arr_out); + + EXPECT_TRUE(std::equal(std::begin(arr_in), std::end(arr_in), + std::begin(arr_out), std::end(arr_out))); +} + +TEST(CheckedContiguousIterator, UnwrapIter) { + static_assert( + std::is_same<int*, decltype(__unwrap_iter( + CheckedContiguousIterator<int>()))>::value, + "Error: CCI<int> should unwrap to int*"); + + static_assert( + std::is_same<CheckedContiguousIterator<std::string>, + decltype(__unwrap_iter( + CheckedContiguousIterator<std::string>()))>::value, + "Error: CCI<std::string> should unwrap to CCI<std::string>"); +} + +// While the result of std::copying into a range via a CCI can't be +// compared to other iterators, it should be possible to re-use it in another +// std::copy expresson. +TEST(CheckedContiguousIterator, ReuseCopyIter) { + using Iter = CheckedContiguousIterator<int>; + + int arr_in[5] = {1, 2, 3, 4, 5}; + int arr_out[5]; + + Iter begin(std::begin(arr_in), std::end(arr_in)); + Iter end(std::begin(arr_in), std::end(arr_in), std::end(arr_in)); + Iter out_begin(std::begin(arr_out), std::end(arr_out)); + + auto out_middle = std::copy_n(begin, 3, out_begin); + std::copy(begin + 3, end, out_middle); + + EXPECT_TRUE(std::equal(std::begin(arr_in), std::end(arr_in), + std::begin(arr_out), std::end(arr_out))); +} + +#endif + } // namespace base diff --git a/chromium/base/containers/circular_deque.h b/chromium/base/containers/circular_deque.h index 07476d29ce5..6c2c3a885a6 100644 --- a/chromium/base/containers/circular_deque.h +++ b/chromium/base/containers/circular_deque.h @@ -522,11 +522,11 @@ class circular_deque { return buffer_[i - right_size]; } value_type& at(size_type i) { - return const_cast<value_type&>(as_const(*this).at(i)); + return const_cast<value_type&>(base::as_const(*this).at(i)); } value_type& operator[](size_type i) { - return const_cast<value_type&>(as_const(*this)[i]); + return const_cast<value_type&>(base::as_const(*this)[i]); } const value_type& operator[](size_type i) const { return at(i); } diff --git a/chromium/base/containers/intrusive_heap.cc b/chromium/base/containers/intrusive_heap.cc index e9c948a53cd..758ae0fe674 100644 --- a/chromium/base/containers/intrusive_heap.cc +++ b/chromium/base/containers/intrusive_heap.cc @@ -4,7 +4,7 @@ #include "base/containers/intrusive_heap.h" -#include "base/logging.h" +#include "base/check.h" #include "base/memory/ptr_util.h" namespace base { diff --git a/chromium/base/containers/intrusive_heap_unittest.cc b/chromium/base/containers/intrusive_heap_unittest.cc index 65aee84f972..85b98ab78fc 100644 --- a/chromium/base/containers/intrusive_heap_unittest.cc +++ b/chromium/base/containers/intrusive_heap_unittest.cc @@ -4,7 +4,9 @@ #include "base/containers/intrusive_heap.h" +#include "base/check_op.h" #include "base/memory/ptr_util.h" +#include "base/notreached.h" #include "base/rand_util.h" #include "base/stl_util.h" #include "testing/gmock/include/gmock/gmock.h" diff --git a/chromium/base/cpu.cc b/chromium/base/cpu.cc index 8cc2028a5f8..d2c197e8a80 100644 --- a/chromium/base/cpu.cc +++ b/chromium/base/cpu.cc @@ -10,6 +10,7 @@ #include <string.h> #include <algorithm> +#include <sstream> #include <utility> #include "base/stl_util.h" diff --git a/chromium/base/critical_closure.h b/chromium/base/critical_closure.h index 5017d19992e..0eb6a3e4445 100644 --- a/chromium/base/critical_closure.h +++ b/chromium/base/critical_closure.h @@ -9,6 +9,7 @@ #include "base/callback.h" #include "base/macros.h" +#include "base/strings/string_piece.h" #include "build/build_config.h" #if defined(OS_IOS) @@ -29,7 +30,7 @@ bool IsMultiTaskingSupported(); // |ios::ScopedCriticalAction|. class CriticalClosure { public: - explicit CriticalClosure(OnceClosure closure); + explicit CriticalClosure(StringPiece task_name, OnceClosure closure); ~CriticalClosure(); void Run(); @@ -51,20 +52,24 @@ class CriticalClosure { // Example: // file_task_runner_->PostTask( // FROM_HERE, -// MakeCriticalClosure(base::BindOnce(&WriteToDiskTask, path_, data))); +// MakeCriticalClosure(task_name, +// base::BindOnce(&WriteToDiskTask, path_, data))); // // Note new closures might be posted in this closure. If the new closures need // background running time, |MakeCriticalClosure| should be applied on them -// before posting. +// before posting. |task_name| is used by the platform to identify any tasks +// that do not complete in time for suspension. #if defined(OS_IOS) -inline OnceClosure MakeCriticalClosure(OnceClosure closure) { +inline OnceClosure MakeCriticalClosure(StringPiece task_name, + OnceClosure closure) { DCHECK(internal::IsMultiTaskingSupported()); return base::BindOnce( &internal::CriticalClosure::Run, - Owned(new internal::CriticalClosure(std::move(closure)))); + Owned(new internal::CriticalClosure(task_name, std::move(closure)))); } #else // defined(OS_IOS) -inline OnceClosure MakeCriticalClosure(OnceClosure closure) { +inline OnceClosure MakeCriticalClosure(StringPiece task_name, + OnceClosure closure) { // No-op for platforms where the application does not need to acquire // background time for closures to finish when it goes into the background. return closure; diff --git a/chromium/base/critical_closure_internal_ios.mm b/chromium/base/critical_closure_internal_ios.mm index e35eca0c7e0..8e79698b8f5 100644 --- a/chromium/base/critical_closure_internal_ios.mm +++ b/chromium/base/critical_closure_internal_ios.mm @@ -13,8 +13,8 @@ bool IsMultiTaskingSupported() { return [[UIDevice currentDevice] isMultitaskingSupported]; } -CriticalClosure::CriticalClosure(OnceClosure closure) - : closure_(std::move(closure)) {} +CriticalClosure::CriticalClosure(StringPiece task_name, OnceClosure closure) + : critical_action_(task_name), closure_(std::move(closure)) {} CriticalClosure::~CriticalClosure() {} diff --git a/chromium/base/debug/activity_analyzer.cc b/chromium/base/debug/activity_analyzer.cc index ea754e452aa..143174a5678 100644 --- a/chromium/base/debug/activity_analyzer.cc +++ b/chromium/base/debug/activity_analyzer.cc @@ -7,10 +7,10 @@ #include <algorithm> #include <utility> +#include "base/check_op.h" #include "base/files/file.h" #include "base/files/file_path.h" #include "base/files/memory_mapped_file.h" -#include "base/logging.h" #include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_macros.h" #include "base/no_destructor.h" diff --git a/chromium/base/debug/alias.cc b/chromium/base/debug/alias.cc index f808c503454..1b218cd57e4 100644 --- a/chromium/base/debug/alias.cc +++ b/chromium/base/debug/alias.cc @@ -3,7 +3,15 @@ // found in the LICENSE file. #include "base/debug/alias.h" -#include "build/build_config.h" + +// This is a widely included header and its size has significant impact on +// build time. Try not to raise this limit unless absolutely necessary. See +// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md +#ifndef NACL_TC_REV +#pragma clang max_tokens_here 250 +#endif + +#include "base/compiler_specific.h" namespace base { namespace debug { diff --git a/chromium/base/debug/alias.h b/chromium/base/debug/alias.h index 35af2c23bf2..cdd2372ea87 100644 --- a/chromium/base/debug/alias.h +++ b/chromium/base/debug/alias.h @@ -5,40 +5,80 @@ #ifndef BASE_DEBUG_ALIAS_H_ #define BASE_DEBUG_ALIAS_H_ +#include <stddef.h> + #include "base/base_export.h" -#include "base/stl_util.h" -#include "base/strings/string_util.h" namespace base { namespace debug { -// Make the optimizer think that var is aliased. This is to prevent it from -// optimizing out local variables that would not otherwise be live at the point -// of a potential crash. -// base::debug::Alias should only be used for local variables, not globals, -// object members, or function return values - these must be copied to locals if -// you want to ensure they are recorded in crash dumps. -// Note that if the local variable is a pointer then its value will be retained -// but the memory that it points to will probably not be saved in the crash -// dump - by default only stack memory is saved. Therefore the aliasing -// technique is usually only worthwhile with non-pointer variables. If you have -// a pointer to an object and you want to retain the object's state you need to -// copy the object or its fields to local variables. Example usage: +// Make the optimizer think that |var| is aliased. This can be used to inhibit +// three different kinds of optimizations: +// +// Case #1: Prevent a local variable from being optimized out if it would not +// otherwise be live at the point of a potential crash. This can only be done +// with local variables, not globals, object members, or function return values +// - these must be copied to locals if you want to ensure they are recorded in +// crash dumps. Note that if the local variable is a pointer then its value will +// be retained but the memory that it points to will probably not be saved in +// the crash dump - by default only stack memory is saved. Therefore the +// aliasing technique is usually only worthwhile with non-pointer variables. If +// you have a pointer to an object and you want to retain the object's state you +// need to copy the object or its fields to local variables. +// +// Example usage: // int last_error = err_; // base::debug::Alias(&last_error); // DEBUG_ALIAS_FOR_CSTR(name_copy, p->name, 16); // CHECK(false); +// +// Case #2: Prevent a tail call into a function. This is useful to make sure the +// function containing the call to base::debug::Alias() will be present in the +// call stack. In this case there is no memory that needs to be on +// the stack so we can use nullptr. The call to base::debug::Alias() needs to +// happen after the call that is suspected to be tail called. Note: This +// technique will prevent taill calls at the specific call site only. To prevent +// them for all invocations of a function look at NOT_TAIL_CALLED. +// +// Example usage: +// NOINLINE void Foo(){ +// ... code ... +// +// Bar(); +// base::debug::Alias(nullptr); +// } +// +// Case #3: Prevent code folding of a non-unique function. Code folding can +// cause the same address to be assigned to different functions if they are +// identical. If finding the precise signature of a function in the call-stack +// is important and it's suspected the function is identical to other functions +// it can be made unique using base::debug::Alias(). +// +// Example usage: +// NOINLINE void Foo(){ +// Bar(); +// const int line_number = __LINE__; +// base::debug::Alias(&line_number); +// } +// +// Finally please note that these effects compound. This means that saving a +// stack variable (case #1) using base::debug::Alias() will also inhibit +// tail calls for calls in earlier lines and prevent code folding. + void BASE_EXPORT Alias(const void* var); } // namespace debug + +BASE_EXPORT size_t strlcpy(char* dst, const char* src, size_t dst_size); + } // namespace base // Convenience macro that copies the null-terminated string from |c_str| into a // stack-allocated char array named |var_name| that holds up to |char_count| // characters and should be preserved in memory dumps. -#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count) \ - char var_name[char_count]; \ - ::base::strlcpy(var_name, (c_str), base::size(var_name)); \ +#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count) \ + char var_name[char_count]; \ + ::base::strlcpy(var_name, (c_str), sizeof(var_name)); \ ::base::debug::Alias(var_name); #endif // BASE_DEBUG_ALIAS_H_ diff --git a/chromium/base/debug/asan_invalid_access.cc b/chromium/base/debug/asan_invalid_access.cc index f464bf6654f..4f3427c1632 100644 --- a/chromium/base/debug/asan_invalid_access.cc +++ b/chromium/base/debug/asan_invalid_access.cc @@ -8,8 +8,8 @@ #include <memory> +#include "base/check.h" #include "base/debug/alias.h" -#include "base/logging.h" #include "build/build_config.h" #if defined(OS_WIN) diff --git a/chromium/base/debug/debugger_posix.cc b/chromium/base/debug/debugger_posix.cc index 4a751773329..de383b80821 100644 --- a/chromium/base/debug/debugger_posix.cc +++ b/chromium/base/debug/debugger_posix.cc @@ -17,8 +17,11 @@ #include <memory> #include <vector> +#include "base/check_op.h" #include "base/clang_profiling_buildflags.h" +#include "base/notreached.h" #include "base/stl_util.h" +#include "base/strings/string_util.h" #include "base/threading/platform_thread.h" #include "base/time/time.h" #include "build/build_config.h" diff --git a/chromium/base/debug/dump_without_crashing.cc b/chromium/base/debug/dump_without_crashing.cc index 1ab8c9cc413..3e2fa17ca68 100644 --- a/chromium/base/debug/dump_without_crashing.cc +++ b/chromium/base/debug/dump_without_crashing.cc @@ -4,7 +4,7 @@ #include "base/debug/dump_without_crashing.h" -#include "base/logging.h" +#include "base/check.h" namespace { diff --git a/chromium/base/debug/dump_without_crashing.h b/chromium/base/debug/dump_without_crashing.h index 913f6c428d6..7c291be632b 100644 --- a/chromium/base/debug/dump_without_crashing.h +++ b/chromium/base/debug/dump_without_crashing.h @@ -24,7 +24,11 @@ namespace debug { // Mac/Linux: // Crashpad does this as part of crash_reporter::InitializeCrashpad. // Returns false if called before SetDumpWithoutCrashingFunction. -BASE_EXPORT bool DumpWithoutCrashing(); +// +// This function must not be called with a tail call because that would cause +// the caller to be omitted from the call stack in the crash dump, and that is +// confusing and omits what is likely the most important context. +BASE_EXPORT bool NOT_TAIL_CALLED DumpWithoutCrashing(); // Sets a function that'll be invoked to dump the current process when // DumpWithoutCrashing() is called. diff --git a/chromium/base/debug/elf_reader.cc b/chromium/base/debug/elf_reader.cc index aea4f3b0348..18238240c39 100644 --- a/chromium/base/debug/elf_reader.cc +++ b/chromium/base/debug/elf_reader.cc @@ -6,6 +6,7 @@ #include <arpa/inet.h> #include <elf.h> +#include <string.h> #include "base/bits.h" #include "base/containers/span.h" diff --git a/chromium/base/debug/invalid_access_win.cc b/chromium/base/debug/invalid_access_win.cc index 993044e22ac..48af993acdc 100644 --- a/chromium/base/debug/invalid_access_win.cc +++ b/chromium/base/debug/invalid_access_win.cc @@ -7,7 +7,7 @@ #include <stdlib.h> #include <windows.h> -#include "base/logging.h" +#include "base/check.h" #include "base/win/windows_version.h" namespace base { diff --git a/chromium/base/debug/proc_maps_linux.cc b/chromium/base/debug/proc_maps_linux.cc index 0bb44b45ac3..e38a5f952cf 100644 --- a/chromium/base/debug/proc_maps_linux.cc +++ b/chromium/base/debug/proc_maps_linux.cc @@ -9,6 +9,7 @@ #include "base/files/file_util.h" #include "base/files/scoped_file.h" +#include "base/logging.h" #include "base/strings/string_split.h" #include "build/build_config.h" diff --git a/chromium/base/debug/stack_trace.cc b/chromium/base/debug/stack_trace.cc index d8ca822d966..e437c9b00ff 100644 --- a/chromium/base/debug/stack_trace.cc +++ b/chromium/base/debug/stack_trace.cc @@ -9,7 +9,7 @@ #include <algorithm> #include <sstream> -#include "base/logging.h" +#include "base/check_op.h" #include "base/stl_util.h" #if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS) diff --git a/chromium/base/deferred_sequenced_task_runner.cc b/chromium/base/deferred_sequenced_task_runner.cc index 18e81ff19e6..0495b12a353 100644 --- a/chromium/base/deferred_sequenced_task_runner.cc +++ b/chromium/base/deferred_sequenced_task_runner.cc @@ -7,7 +7,7 @@ #include <utility> #include "base/bind.h" -#include "base/logging.h" +#include "base/check.h" namespace base { diff --git a/chromium/base/file_version_info_mac.mm b/chromium/base/file_version_info_mac.mm index 0b50a756c98..b463e8d729c 100644 --- a/chromium/base/file_version_info_mac.mm +++ b/chromium/base/file_version_info_mac.mm @@ -7,7 +7,6 @@ #import <Foundation/Foundation.h> #include "base/files/file_path.h" -#include "base/logging.h" #include "base/mac/bundle_locations.h" #include "base/mac/foundation_util.h" #include "base/strings/sys_string_conversions.h" diff --git a/chromium/base/file_version_info_win.cc b/chromium/base/file_version_info_win.cc index 00a53e76457..242cb01d7c6 100644 --- a/chromium/base/file_version_info_win.cc +++ b/chromium/base/file_version_info_win.cc @@ -9,8 +9,8 @@ #include <utility> +#include "base/check.h" #include "base/files/file_path.h" -#include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/stl_util.h" #include "base/strings/string_util.h" diff --git a/chromium/base/files/dir_reader_linux.h b/chromium/base/files/dir_reader_linux.h index f12deeb2ff1..e804b0b498d 100644 --- a/chromium/base/files/dir_reader_linux.h +++ b/chromium/base/files/dir_reader_linux.h @@ -9,6 +9,7 @@ #include <fcntl.h> #include <stddef.h> #include <stdint.h> +#include <string.h> #include <sys/syscall.h> #include <unistd.h> diff --git a/chromium/base/files/dir_reader_posix_unittest.cc b/chromium/base/files/dir_reader_posix_unittest.cc index 1954cb2f08f..72d61529d6f 100644 --- a/chromium/base/files/dir_reader_posix_unittest.cc +++ b/chromium/base/files/dir_reader_posix_unittest.cc @@ -11,8 +11,8 @@ #include <string.h> #include <unistd.h> +#include "base/check.h" #include "base/files/scoped_temp_dir.h" -#include "base/logging.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/files/file.cc b/chromium/base/files/file.cc index fd251d15bed..22057eddefc 100644 --- a/chromium/base/files/file.cc +++ b/chromium/base/files/file.cc @@ -6,9 +6,11 @@ #include <utility> +#include "base/check_op.h" #include "base/files/file_path.h" #include "base/files/file_tracing.h" #include "base/metrics/histogram.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/timer/elapsed_timer.h" #include "build/build_config.h" diff --git a/chromium/base/files/file_enumerator_win.cc b/chromium/base/files/file_enumerator_win.cc index a11e83bd719..0185c9f3ac8 100644 --- a/chromium/base/files/file_enumerator_win.cc +++ b/chromium/base/files/file_enumerator_win.cc @@ -7,7 +7,8 @@ #include <stdint.h> #include <string.h> -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "base/strings/string_util.h" #include "base/threading/scoped_blocking_call.h" #include "base/win/shlwapi.h" diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc index 65459e00457..f1a4b0a9bf6 100644 --- a/chromium/base/files/file_path.cc +++ b/chromium/base/files/file_path.cc @@ -7,7 +7,7 @@ #include <string.h> #include <algorithm> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/pickle.h" #include "base/strings/strcat.h" diff --git a/chromium/base/files/file_path_watcher.cc b/chromium/base/files/file_path_watcher.cc index af403468585..17527e25c5f 100644 --- a/chromium/base/files/file_path_watcher.cc +++ b/chromium/base/files/file_path_watcher.cc @@ -7,7 +7,7 @@ #include "base/files/file_path_watcher.h" -#include "base/logging.h" +#include "base/check.h" #include "build/build_config.h" namespace base { diff --git a/chromium/base/files/file_path_watcher_fsevents.cc b/chromium/base/files/file_path_watcher_fsevents.cc index 7ac9f751f7e..5b7132e0a33 100644 --- a/chromium/base/files/file_path_watcher_fsevents.cc +++ b/chromium/base/files/file_path_watcher_fsevents.cc @@ -9,9 +9,9 @@ #include <list> #include "base/bind.h" +#include "base/check.h" #include "base/files/file_util.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/mac/scoped_cftyperef.h" #include "base/stl_util.h" #include "base/strings/stringprintf.h" diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc index 75fae8f579f..ccefc27666b 100644 --- a/chromium/base/files/file_path_watcher_unittest.cc +++ b/chromium/base/files/file_path_watcher_unittest.cc @@ -176,12 +176,6 @@ class FilePathWatcherTest : public testing::Test { return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest.lnk"); } - // Write |content| to |file|. Returns true on success. - bool WriteFile(const FilePath& file, const std::string& content) { - int write_size = ::base::WriteFile(file, content.c_str(), content.length()); - return write_size == static_cast<int>(content.length()); - } - bool SetupWatch(const FilePath& target, FilePathWatcher* watcher, TestDelegateBase* delegate, diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc index b925602649d..7780af7cd30 100644 --- a/chromium/base/files/file_posix.cc +++ b/chromium/base/files/file_posix.cc @@ -9,8 +9,9 @@ #include <stdint.h> #include <unistd.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/metrics/histogram_functions.h" +#include "base/notreached.h" #include "base/posix/eintr_wrapper.h" #include "base/strings/utf_string_conversions.h" #include "base/threading/scoped_blocking_call.h" diff --git a/chromium/base/files/file_util.cc b/chromium/base/files/file_util.cc index 0ae3784070c..546934b7084 100644 --- a/chromium/base/files/file_util.cc +++ b/chromium/base/files/file_util.cc @@ -13,9 +13,10 @@ #include <limits> #include <memory> +#include "base/check_op.h" #include "base/files/file_enumerator.h" #include "base/files/file_path.h" -#include "base/logging.h" +#include "base/posix/eintr_wrapper.h" #include "base/strings/string_piece.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" @@ -125,30 +126,48 @@ bool TextContentsEqual(const FilePath& filename1, const FilePath& filename2) { } #endif // !defined(OS_NACL_NONSFI) -bool ReadFileToStringWithMaxSize(const FilePath& path, - std::string* contents, - size_t max_size) { +bool ReadStreamToString(FILE* stream, std::string* contents) { + return ReadStreamToStringWithMaxSize( + stream, std::numeric_limits<size_t>::max(), contents); +} + +bool ReadStreamToStringWithMaxSize(FILE* stream, + size_t max_size, + std::string* contents) { if (contents) contents->clear(); - if (path.ReferencesParent()) - return false; - FILE* file = OpenFile(path, "rb"); - if (!file) { - return false; - } - // Many files supplied in |path| have incorrect size (proc files etc). - // Hence, the file is read sequentially as opposed to a one-shot read, using - // file size as a hint for chunk size if available. + // Seeking to the beginning is best-effort -- it is expected to fail for + // certain non-file stream (e.g., pipes). + HANDLE_EINTR(fseek(stream, 0, SEEK_SET)); + + // Many files have incorrect size (proc files etc). Hence, the file is read + // sequentially as opposed to a one-shot read, using file size as a hint for + // chunk size if available. constexpr int64_t kDefaultChunkSize = 1 << 16; - int64_t chunk_size; + int64_t chunk_size = kDefaultChunkSize - 1; #if !defined(OS_NACL_NONSFI) - if (!GetFileSize(path, &chunk_size) || chunk_size <= 0) - chunk_size = kDefaultChunkSize - 1; + ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); +#if defined(OS_WIN) + BY_HANDLE_FILE_INFORMATION file_info = {}; + if (::GetFileInformationByHandle( + reinterpret_cast<HANDLE>(_get_osfhandle(_fileno(stream))), + &file_info)) { + LARGE_INTEGER size; + size.HighPart = file_info.nFileSizeHigh; + size.LowPart = file_info.nFileSizeLow; + if (size.QuadPart > 0) + chunk_size = size.QuadPart; + } +#else // defined(OS_WIN) + stat_wrapper_t file_info = {}; + if (!File::Fstat(fileno(stream), &file_info) && file_info.st_size > 0) + chunk_size = file_info.st_size; +#endif // defined(OS_WIN) // We need to attempt to read at EOF for feof flag to be set so here we // use |chunk_size| + 1. chunk_size = std::min<uint64_t>(chunk_size, max_size) + 1; -#else +#else // !defined(OS_NACL_NONSFI) chunk_size = kDefaultChunkSize; #endif // !defined(OS_NACL_NONSFI) size_t bytes_read_this_pass; @@ -157,9 +176,8 @@ bool ReadFileToStringWithMaxSize(const FilePath& path, std::string local_contents; local_contents.resize(chunk_size); - ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); while ((bytes_read_this_pass = fread(&local_contents[bytes_read_so_far], 1, - chunk_size, file)) > 0) { + chunk_size, stream)) > 0) { if ((max_size - bytes_read_so_far) < bytes_read_this_pass) { // Read more than max_size bytes, bail out. bytes_read_so_far = max_size; @@ -174,12 +192,11 @@ bool ReadFileToStringWithMaxSize(const FilePath& path, bytes_read_so_far += bytes_read_this_pass; // Last fread syscall (after EOF) can be avoided via feof, which is just a // flag check. - if (feof(file)) + if (feof(stream)) break; local_contents.resize(bytes_read_so_far + chunk_size); } - read_status = read_status && !ferror(file); - CloseFile(file); + read_status = read_status && !ferror(stream); if (contents) { contents->swap(local_contents); contents->resize(bytes_read_so_far); @@ -193,6 +210,19 @@ bool ReadFileToString(const FilePath& path, std::string* contents) { std::numeric_limits<size_t>::max()); } +bool ReadFileToStringWithMaxSize(const FilePath& path, + std::string* contents, + size_t max_size) { + if (contents) + contents->clear(); + if (path.ReferencesParent()) + return false; + ScopedFILE file_stream(OpenFile(path, "rb")); + if (!file_stream) + return false; + return ReadStreamToStringWithMaxSize(file_stream.get(), max_size, contents); +} + #if !defined(OS_NACL_NONSFI) bool IsDirectoryEmpty(const FilePath& dir_path) { FileEnumerator files(dir_path, false, @@ -202,12 +232,17 @@ bool IsDirectoryEmpty(const FilePath& dir_path) { return false; } -FILE* CreateAndOpenTemporaryFile(FilePath* path) { +bool CreateTemporaryFile(FilePath* path) { + FilePath temp_dir; + return GetTempDir(&temp_dir) && CreateTemporaryFileInDir(temp_dir, path); +} + +ScopedFILE CreateAndOpenTemporaryStream(FilePath* path) { FilePath directory; if (!GetTempDir(&directory)) return nullptr; - return CreateAndOpenTemporaryFileInDir(directory, path); + return CreateAndOpenTemporaryStreamInDir(directory, path); } bool CreateDirectory(const FilePath& full_path) { @@ -270,6 +305,17 @@ bool TruncateFile(FILE* file) { return true; } +bool WriteFile(const FilePath& filename, span<const uint8_t> data) { + int size = checked_cast<int>(data.size()); + return WriteFile(filename, reinterpret_cast<const char*>(data.data()), + size) == size; +} + +bool WriteFile(const FilePath& filename, StringPiece data) { + int size = checked_cast<int>(data.size()); + return WriteFile(filename, data.data(), size) == size; +} + int GetUniquePathNumber(const FilePath& path) { DCHECK(!path.empty()); if (!PathExists(path)) diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h index 9c3e06b888f..76bd98767f5 100644 --- a/chromium/base/files/file_util.h +++ b/chromium/base/files/file_util.h @@ -23,8 +23,10 @@ #endif #include "base/base_export.h" +#include "base/containers/span.h" #include "base/files/file.h" #include "base/files/file_path.h" +#include "base/files/scoped_file.h" #include "base/strings/string16.h" #include "build/build_config.h" @@ -194,6 +196,16 @@ BASE_EXPORT bool ReadFileToStringWithMaxSize(const FilePath& path, std::string* contents, size_t max_size); +// As ReadFileToString, but reading from an open stream after seeking to its +// start (if supported by the stream). +BASE_EXPORT bool ReadStreamToString(FILE* stream, std::string* contents); + +// As ReadFileToStringWithMaxSize, but reading from an open stream after seeking +// to its start (if supported by the stream). +BASE_EXPORT bool ReadStreamToStringWithMaxSize(FILE* stream, + size_t max_size, + std::string* contents); + #if defined(OS_POSIX) || defined(OS_FUCHSIA) // Read exactly |bytes| bytes from file descriptor |fd|, storing the result @@ -201,8 +213,9 @@ BASE_EXPORT bool ReadFileToStringWithMaxSize(const FilePath& path, // Returns true iff |bytes| bytes have been successfully read from |fd|. BASE_EXPORT bool ReadFromFD(int fd, char* buffer, size_t bytes); -// Performs the same function as CreateAndOpenTemporaryFileInDir(), but returns -// the file-descriptor wrapped in a ScopedFD, rather than wrapped in a FILE. +// Performs the same function as CreateAndOpenTemporaryStreamInDir(), but +// returns the file-descriptor wrapped in a ScopedFD, rather than the stream +// wrapped in a ScopedFILE. BASE_EXPORT ScopedFD CreateAndOpenFdForTemporaryFileInDir(const FilePath& dir, FilePath* path); @@ -280,6 +293,13 @@ BASE_EXPORT bool GetTempDir(FilePath* path); // Path service may also override DIR_HOME. BASE_EXPORT FilePath GetHomeDir(); +// Returns a new temporary file in |dir| with a unique name. The file is opened +// for exclusive read, write, and delete access (note: exclusivity is unique to +// Windows). On Windows, the returned file supports File::DeleteOnClose. +// On success, |temp_file| is populated with the full path to the created file. +BASE_EXPORT File CreateAndOpenTemporaryFileInDir(const FilePath& dir, + FilePath* temp_file); + // Creates a temporary file. The full path is placed in |path|, and the // function returns true if was successful in creating the file. The file will // be empty and all handles closed after this function returns. @@ -289,14 +309,14 @@ BASE_EXPORT bool CreateTemporaryFile(FilePath* path); BASE_EXPORT bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file); -// Create and open a temporary file. File is opened for read/write. -// The full path is placed in |path|. -// Returns a handle to the opened file or NULL if an error occurred. -BASE_EXPORT FILE* CreateAndOpenTemporaryFile(FilePath* path); +// Create and open a temporary file stream for exclusive read, write, and delete +// access (note: exclusivity is unique to Windows). The full path is placed in +// |path|. Returns the opened file stream, or null in case of error. +BASE_EXPORT ScopedFILE CreateAndOpenTemporaryStream(FilePath* path); -// Similar to CreateAndOpenTemporaryFile, but the file is created in |dir|. -BASE_EXPORT FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, - FilePath* path); +// Similar to CreateAndOpenTemporaryStream, but the file is created in |dir|. +BASE_EXPORT ScopedFILE CreateAndOpenTemporaryStreamInDir(const FilePath& dir, + FilePath* path); // Create a new directory. If prefix is provided, the new directory name is in // the format of prefixyyyy. @@ -380,6 +400,9 @@ BASE_EXPORT bool CloseFile(FILE* file); // functions take ownership of the existing File. BASE_EXPORT FILE* FileToFILE(File file, const char* mode); +// Returns a new handle to the file underlying |file_stream|. +BASE_EXPORT File FILEToFile(FILE* file_stream); + // Truncates an open file to end at the location of the current file pointer. // This is a cross-platform analog to Windows' SetEndOfFile() function. BASE_EXPORT bool TruncateFile(FILE* file); @@ -391,9 +414,19 @@ BASE_EXPORT int ReadFile(const FilePath& filename, char* data, int max_size); // Writes the given buffer into the file, overwriting any data that was // previously there. Returns the number of bytes written, or -1 on error. // If file doesn't exist, it gets created with read/write permissions for all. +// Note that the other variants of WriteFile() below may be easier to use. BASE_EXPORT int WriteFile(const FilePath& filename, const char* data, int size); +// Writes |data| into the file, overwriting any data that was previously there. +// Returns true if and only if all of |data| was written. If the file does not +// exist, it gets created with read/write permissions for all. +BASE_EXPORT bool WriteFile(const FilePath& filename, span<const uint8_t> data); + +// Another WriteFile() variant that takes a StringPiece so callers don't have to +// do manual conversions from a char span to a uint8_t span. +BASE_EXPORT bool WriteFile(const FilePath& filename, StringPiece data); + #if defined(OS_POSIX) || defined(OS_FUCHSIA) // Appends |data| to |fd|. Does not close |fd| when done. Returns true iff // |size| bytes of |data| were written to |fd|. diff --git a/chromium/base/files/file_util_mac.mm b/chromium/base/files/file_util_mac.mm index e1feec26da4..e53732a22ad 100644 --- a/chromium/base/files/file_util_mac.mm +++ b/chromium/base/files/file_util_mac.mm @@ -9,8 +9,8 @@ #include <stdlib.h> #include <string.h> +#include "base/check_op.h" #include "base/files/file_path.h" -#include "base/logging.h" #include "base/mac/foundation_util.h" #include "base/strings/string_util.h" #include "base/threading/scoped_blocking_call.h" diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc index bf91b86821d..91514251d9c 100644 --- a/chromium/base/files/file_util_posix.cc +++ b/chromium/base/files/file_util_posix.cc @@ -636,17 +636,21 @@ FilePath GetHomeDir() { } #endif // !defined(OS_MACOSX) -bool CreateTemporaryFile(FilePath* path) { +File CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { // For call to close() inside ScopedFD. ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); - FilePath directory; - if (!GetTempDir(&directory)) - return false; - ScopedFD fd = CreateAndOpenFdForTemporaryFileInDir(directory, path); + return File(CreateAndOpenFdForTemporaryFileInDir(dir, temp_file)); +} + +bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { + // For call to close() inside ScopedFD. + ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); + ScopedFD fd = CreateAndOpenFdForTemporaryFileInDir(dir, temp_file); return fd.is_valid(); } -FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) { +ScopedFILE CreateAndOpenTemporaryStreamInDir(const FilePath& dir, + FilePath* path) { ScopedFD scoped_fd = CreateAndOpenFdForTemporaryFileInDir(dir, path); if (!scoped_fd.is_valid()) return nullptr; @@ -655,14 +659,7 @@ FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) { FILE* file = fdopen(fd, "a+"); if (!file) close(fd); - return file; -} - -bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { - // For call to close() inside ScopedFD. - ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); - ScopedFD fd = CreateAndOpenFdForTemporaryFileInDir(dir, temp_file); - return fd.is_valid(); + return ScopedFILE(file); } static bool CreateTemporaryDirInDirImpl(const FilePath& base_dir, @@ -820,6 +817,18 @@ FILE* FileToFILE(File file, const char* mode) { file.TakePlatformFile(); return stream; } + +File FILEToFile(FILE* file_stream) { + if (!file_stream) + return File(); + + PlatformFile fd = fileno(file_stream); + DCHECK_NE(fd, -1); + ScopedPlatformFile other_fd(HANDLE_EINTR(dup(fd))); + if (!other_fd.is_valid()) + return File(File::GetLastFileError()); + return File(std::move(other_fd)); +} #endif // !defined(OS_NACL) int ReadFile(const FilePath& filename, char* data, int max_size) { diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc index 87086b2c05e..2ade5709855 100644 --- a/chromium/base/files/file_util_unittest.cc +++ b/chromium/base/files/file_util_unittest.cc @@ -918,7 +918,7 @@ TEST_F(FileUtilTest, ChangeFilePermissionsAndRead) { char buffer[kDataSize]; // Write file. - EXPECT_EQ(kDataSize, WriteFile(file_name, kData, kDataSize)); + EXPECT_TRUE(WriteFile(file_name, kData)); EXPECT_TRUE(PathExists(file_name)); // Make sure the file is readable. @@ -954,8 +954,7 @@ TEST_F(FileUtilTest, ChangeFilePermissionsAndWrite) { const std::string kData("hello"); // Write file. - EXPECT_EQ(static_cast<int>(kData.length()), - WriteFile(file_name, kData.data(), kData.length())); + EXPECT_TRUE(WriteFile(file_name, kData)); EXPECT_TRUE(PathExists(file_name)); // Make sure the file is writable. @@ -969,7 +968,7 @@ TEST_F(FileUtilTest, ChangeFilePermissionsAndWrite) { EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode)); EXPECT_FALSE(mode & FILE_PERMISSION_WRITE_BY_USER); // Make sure the file can't be write. - EXPECT_EQ(-1, WriteFile(file_name, kData.data(), kData.length())); + EXPECT_FALSE(WriteFile(file_name, kData)); EXPECT_FALSE(PathIsWritable(file_name)); // Give read permission. @@ -978,8 +977,7 @@ TEST_F(FileUtilTest, ChangeFilePermissionsAndWrite) { EXPECT_TRUE(GetPosixFilePermissions(file_name, &mode)); EXPECT_TRUE(mode & FILE_PERMISSION_WRITE_BY_USER); // Make sure the file can be write. - EXPECT_EQ(static_cast<int>(kData.length()), - WriteFile(file_name, kData.data(), kData.length())); + EXPECT_TRUE(WriteFile(file_name, kData)); EXPECT_TRUE(PathIsWritable(file_name)); // Delete the file. @@ -997,8 +995,7 @@ TEST_F(FileUtilTest, ChangeDirectoryPermissionsAndEnumerate) { FilePath file_name = subdir_path.Append(FPL("Test Readable File.txt")); EXPECT_FALSE(PathExists(file_name)); const std::string kData("hello"); - EXPECT_EQ(static_cast<int>(kData.length()), - WriteFile(file_name, kData.data(), kData.length())); + EXPECT_TRUE(WriteFile(file_name, kData)); EXPECT_TRUE(PathExists(file_name)); // Make sure the directory has the all permissions. @@ -1057,11 +1054,9 @@ TEST_F(FileUtilTest, ExecutableExistsInPath) { // Write file. const std::string kData("hello"); - ASSERT_EQ(static_cast<int>(kData.length()), - WriteFile(kExePath, kData.data(), kData.length())); + ASSERT_TRUE(WriteFile(kExePath, kData)); ASSERT_TRUE(PathExists(kExePath)); - ASSERT_EQ(static_cast<int>(kData.length()), - WriteFile(kRegularFilePath, kData.data(), kData.length())); + ASSERT_TRUE(WriteFile(kRegularFilePath, kData)); ASSERT_TRUE(PathExists(kRegularFilePath)); ASSERT_TRUE(SetPosixFilePermissions(dir1.Append(kExeFileName), @@ -2542,6 +2537,24 @@ TEST_F(FileUtilTest, OpenFileNoInheritance) { } } +TEST_F(FileUtilTest, CreateAndOpenTemporaryFileInDir) { + // Create a temporary file. + FilePath path; + File file = CreateAndOpenTemporaryFileInDir(temp_dir_.GetPath(), &path); + ASSERT_TRUE(file.IsValid()); + EXPECT_FALSE(path.empty()); + + // Try to open another handle to it. + File file2(path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_SHARE_DELETE); +#if defined(OS_WIN) + // The file cannot be opened again on account of the exclusive access. + EXPECT_FALSE(file2.IsValid()); +#else + // Exclusive access isn't a thing on non-Windows platforms. + EXPECT_TRUE(file2.IsValid()); +#endif +} + TEST_F(FileUtilTest, CreateTemporaryFileTest) { FilePath temp_files[3]; for (auto& i : temp_files) { @@ -2555,14 +2568,14 @@ TEST_F(FileUtilTest, CreateTemporaryFileTest) { EXPECT_TRUE(DeleteFile(i, false)); } -TEST_F(FileUtilTest, CreateAndOpenTemporaryFileTest) { +TEST_F(FileUtilTest, CreateAndOpenTemporaryStreamTest) { FilePath names[3]; - FILE* fps[3]; + ScopedFILE fps[3]; int i; // Create; make sure they are open and exist. for (i = 0; i < 3; ++i) { - fps[i] = CreateAndOpenTemporaryFile(&(names[i])); + fps[i] = CreateAndOpenTemporaryStream(&(names[i])); ASSERT_TRUE(fps[i]); EXPECT_TRUE(PathExists(names[i])); } @@ -2574,7 +2587,7 @@ TEST_F(FileUtilTest, CreateAndOpenTemporaryFileTest) { // Close and delete. for (i = 0; i < 3; ++i) { - EXPECT_TRUE(CloseFile(fps[i])); + fps[i].reset(); EXPECT_TRUE(DeleteFile(names[i], false)); } } @@ -2636,6 +2649,19 @@ TEST_F(FileUtilTest, FileToFILE) { EXPECT_TRUE(CloseFile(stream)); } +TEST_F(FileUtilTest, FILEToFile) { + ScopedFILE stream; + EXPECT_FALSE(FILEToFile(stream.get()).IsValid()); + + stream.reset(OpenFile(temp_dir_.GetPath().Append(FPL("hello.txt")), "wb+")); + ASSERT_TRUE(stream); + File file = FILEToFile(stream.get()); + EXPECT_TRUE(file.IsValid()); + ASSERT_EQ(fprintf(stream.get(), "there"), 5); + ASSERT_EQ(fflush(stream.get()), 0); + EXPECT_EQ(file.GetLength(), 5L); +} + TEST_F(FileUtilTest, CreateNewTempDirectoryTest) { FilePath temp_dir; ASSERT_TRUE(CreateNewTempDirectory(FilePath::StringType(), &temp_dir)); @@ -2982,8 +3008,7 @@ TEST_F(FileUtilTest, AppendToFile) { std::string data("hello"); EXPECT_FALSE(AppendToFile(foobar, data.c_str(), data.size())); - EXPECT_EQ(static_cast<int>(data.length()), - WriteFile(foobar, data.c_str(), data.length())); + EXPECT_TRUE(WriteFile(foobar, data)); EXPECT_TRUE(AppendToFile(foobar, data.c_str(), data.size())); const std::wstring read_content = ReadTextFile(foobar); @@ -2996,8 +3021,7 @@ TEST_F(FileUtilTest, ReadFile) { FilePath file_path = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("ReadFileTest")); - ASSERT_EQ(static_cast<int>(kTestData.size()), - WriteFile(file_path, kTestData.data(), kTestData.size())); + ASSERT_TRUE(WriteFile(file_path, kTestData)); // Make buffers with various size. std::vector<char> small_buffer(kTestData.size() / 2); @@ -3047,8 +3071,7 @@ TEST_F(FileUtilTest, ReadFileToString) { .Append(FILE_PATH_LITERAL("ReadFileToStringTest")); // Create test file. - ASSERT_EQ(static_cast<int>(strlen(kTestData)), - WriteFile(file_path, kTestData, strlen(kTestData))); + ASSERT_TRUE(WriteFile(file_path, kTestData)); EXPECT_TRUE(ReadFileToString(file_path, &data)); EXPECT_EQ(kTestData, data); @@ -3285,7 +3308,11 @@ MULTIPROCESS_TEST_MAIN(ChildMain) { PIPE_WAIT, 1, 0, 0, 0, NULL); EXPECT_NE(ph, INVALID_HANDLE_VALUE); EXPECT_TRUE(SetEvent(sync_event.Get())); - EXPECT_TRUE(ConnectNamedPipe(ph, NULL)); + if (!::ConnectNamedPipe(ph, /*lpOverlapped=*/nullptr)) { + // ERROR_PIPE_CONNECTED means that the other side has already connected. + auto error = ::GetLastError(); + EXPECT_EQ(error, DWORD{ERROR_PIPE_CONNECTED}); + } DWORD written; EXPECT_TRUE(::WriteFile(ph, kTestData, strlen(kTestData), &written, NULL)); @@ -3312,7 +3339,11 @@ MULTIPROCESS_TEST_MAIN(MoreThanBufferSizeChildMain) { PIPE_WAIT, 1, data.size(), data.size(), 0, NULL); EXPECT_NE(ph, INVALID_HANDLE_VALUE); EXPECT_TRUE(SetEvent(sync_event.Get())); - EXPECT_TRUE(ConnectNamedPipe(ph, NULL)); + if (!::ConnectNamedPipe(ph, /*lpOverlapped=*/nullptr)) { + // ERROR_PIPE_CONNECTED means that the other side has already connected. + auto error = ::GetLastError(); + EXPECT_EQ(error, DWORD{ERROR_PIPE_CONNECTED}); + } DWORD written; EXPECT_TRUE(::WriteFile(ph, data.c_str(), data.size(), &written, NULL)); @@ -3461,8 +3492,7 @@ TEST_F(FileUtilTest, ReadFileToStringWithLargeFile) { temp_dir_.GetPath().Append(FILE_PATH_LITERAL("ReadFileToStringTest")); // Create test file. - ASSERT_EQ(static_cast<int>(kLargeFileSize), - WriteFile(file_path, data.c_str(), kLargeFileSize)); + ASSERT_TRUE(WriteFile(file_path, data)); std::string actual_data = "temp"; EXPECT_TRUE(ReadFileToString(file_path, &actual_data)); @@ -3479,6 +3509,33 @@ TEST_F(FileUtilTest, ReadFileToStringWithLargeFile) { EXPECT_EQ(std::string(kLargeFileSize - 1, 'c'), actual_data); } +TEST_F(FileUtilTest, ReadStreamToString) { + ScopedFILE stream( + OpenFile(temp_dir_.GetPath().Append(FPL("hello.txt")), "wb+")); + ASSERT_TRUE(stream); + File file = FILEToFile(stream.get()); + ASSERT_TRUE(file.IsValid()); + ASSERT_EQ(fprintf(stream.get(), "there"), 5); + ASSERT_EQ(fflush(stream.get()), 0); + + std::string contents; + EXPECT_TRUE(ReadStreamToString(stream.get(), &contents)); + EXPECT_EQ(contents, std::string("there")); +} + +TEST_F(FileUtilTest, ReadStreamToStringWithMaxSize) { + ScopedFILE stream( + OpenFile(temp_dir_.GetPath().Append(FPL("hello.txt")), "wb+")); + ASSERT_TRUE(stream); + File file = FILEToFile(stream.get()); + ASSERT_TRUE(file.IsValid()); + ASSERT_EQ(fprintf(stream.get(), "there"), 5); + ASSERT_EQ(fflush(stream.get()), 0); + + std::string contents; + EXPECT_FALSE(ReadStreamToStringWithMaxSize(stream.get(), 2, &contents)); +} + TEST_F(FileUtilTest, TouchFile) { FilePath data_dir = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("FilePathTest")); @@ -3491,8 +3548,7 @@ TEST_F(FileUtilTest, TouchFile) { FilePath foobar(data_dir.Append(FILE_PATH_LITERAL("foobar.txt"))); std::string data("hello"); - ASSERT_EQ(static_cast<int>(data.length()), - WriteFile(foobar, data.c_str(), data.length())); + ASSERT_TRUE(WriteFile(foobar, data)); Time access_time; // This timestamp is divisible by one day (in local timezone), @@ -3518,6 +3574,51 @@ TEST_F(FileUtilTest, TouchFile) { file_info.last_modified.ToInternalValue()); } +TEST_F(FileUtilTest, WriteFileSpanVariant) { + FilePath empty_file = + temp_dir_.GetPath().Append(FILE_PATH_LITERAL("empty_file")); + ASSERT_FALSE(PathExists(empty_file)); + EXPECT_TRUE(WriteFile(empty_file, base::span<const uint8_t>())); + EXPECT_TRUE(PathExists(empty_file)); + + std::string data = "not empty"; + EXPECT_TRUE(ReadFileToString(empty_file, &data)); + EXPECT_TRUE(data.empty()); + + FilePath write_span_file = + temp_dir_.GetPath().Append(FILE_PATH_LITERAL("write_span_file")); + ASSERT_FALSE(PathExists(write_span_file)); + static constexpr uint8_t kInput[] = {'h', 'e', 'l', 'l', 'o'}; + EXPECT_TRUE(WriteFile(write_span_file, kInput)); + EXPECT_TRUE(PathExists(write_span_file)); + + data.clear(); + EXPECT_TRUE(ReadFileToString(write_span_file, &data)); + EXPECT_EQ("hello", data); +} + +TEST_F(FileUtilTest, WriteFileStringVariant) { + FilePath empty_file = + temp_dir_.GetPath().Append(FILE_PATH_LITERAL("empty_file")); + ASSERT_FALSE(PathExists(empty_file)); + EXPECT_TRUE(WriteFile(empty_file, "")); + EXPECT_TRUE(PathExists(empty_file)); + + std::string data = "not empty"; + EXPECT_TRUE(ReadFileToString(empty_file, &data)); + EXPECT_TRUE(data.empty()); + + FilePath write_span_file = + temp_dir_.GetPath().Append(FILE_PATH_LITERAL("write_string_file")); + ASSERT_FALSE(PathExists(write_span_file)); + EXPECT_TRUE(WriteFile(write_span_file, "world")); + EXPECT_TRUE(PathExists(write_span_file)); + + data.clear(); + EXPECT_TRUE(ReadFileToString(write_span_file, &data)); + EXPECT_EQ("world", data); +} + TEST_F(FileUtilTest, IsDirectoryEmpty) { FilePath empty_dir = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("EmptyDir")); @@ -3530,8 +3631,7 @@ TEST_F(FileUtilTest, IsDirectoryEmpty) { FilePath foo(empty_dir.Append(FILE_PATH_LITERAL("foo.txt"))); std::string bar("baz"); - ASSERT_EQ(static_cast<int>(bar.length()), - WriteFile(foo, bar.c_str(), bar.length())); + ASSERT_TRUE(WriteFile(foo, bar)); EXPECT_FALSE(IsDirectoryEmpty(empty_dir)); } @@ -4029,7 +4129,7 @@ TEST(FileUtilMultiThreadedTest, MAYBE_MultiThreadedTempFiles) { const RepeatingClosure open_write_close_read = BindRepeating([]() { FilePath output_filename; - ScopedFILE output_file(CreateAndOpenTemporaryFile(&output_filename)); + ScopedFILE output_file(CreateAndOpenTemporaryStream(&output_filename)); EXPECT_TRUE(output_file); const std::string content = GenerateGUID(); diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc index a39fc4c8709..e0ac2598455 100644 --- a/chromium/base/files/file_util_win.cc +++ b/chromium/base/files/file_util_win.cc @@ -404,11 +404,6 @@ bool ReplaceFile(const FilePath& from_path, const FilePath& to_path, File::Error* error) { ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); - // Try a simple move first. It will only succeed when |to_path| doesn't - // already exist. - if (::MoveFile(from_path.value().c_str(), to_path.value().c_str())) - return true; - File::Error move_error = File::OSErrorToFileError(GetLastError()); // Alias paths for investigation of shutdown hangs. crbug.com/1054164 FilePath::CharType from_path_str[MAX_PATH]; @@ -419,21 +414,29 @@ bool ReplaceFile(const FilePath& from_path, base::wcslcpy(to_path_str, to_path.value().c_str(), base::size(to_path_str)); base::debug::Alias(to_path_str); - // Try the full-blown replace if the move fails, as ReplaceFile will only - // succeed when |to_path| does exist. When writing to a network share, we - // may not be able to change the ACLs. Ignore ACL errors then - // (REPLACEFILE_IGNORE_MERGE_ERRORS). + // Assume that |to_path| already exists and try the normal replace. This will + // fail with ERROR_FILE_NOT_FOUND if |to_path| does not exist. When writing to + // a network share, we may not be able to change the ACLs. Ignore ACL errors + // then (REPLACEFILE_IGNORE_MERGE_ERRORS). if (::ReplaceFile(to_path.value().c_str(), from_path.value().c_str(), NULL, REPLACEFILE_IGNORE_MERGE_ERRORS, NULL, NULL)) { return true; } + + File::Error replace_error = File::OSErrorToFileError(GetLastError()); + + // Try a simple move next. It will only succeed when |to_path| doesn't already + // exist. + if (::MoveFile(from_path.value().c_str(), to_path.value().c_str())) + return true; + // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that // |to_path| does not exist. In this case, the more relevant error comes // from the call to MoveFile. if (error) { - File::Error replace_error = File::OSErrorToFileError(GetLastError()); - *error = replace_error == File::FILE_ERROR_NOT_FOUND ? move_error - : replace_error; + *error = replace_error == File::FILE_ERROR_NOT_FOUND + ? File::GetLastFileError() + : replace_error; } return false; } @@ -505,39 +508,15 @@ FilePath GetHomeDir() { return FilePath(FILE_PATH_LITERAL("C:\\")); } -bool CreateTemporaryFile(FilePath* path) { - ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); - - FilePath temp_file; - - if (!GetTempDir(path)) - return false; - - if (CreateTemporaryFileInDir(*path, &temp_file)) { - *path = temp_file; - return true; - } - - return false; -} - -// On POSIX we have semantics to create and open a temporary file -// atomically. -// TODO(jrg): is there equivalent call to use on Windows instead of -// going 2-step? -FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) { +File CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); - if (!CreateTemporaryFileInDir(dir, path)) { - return NULL; - } - // Open file in binary mode, to avoid problems with fwrite. On Windows - // it replaces \n's with \r\n's, which may surprise you. - // Reference: http://msdn.microsoft.com/en-us/library/h9t88zwz(VS.71).aspx - return OpenFile(*path, "wb+"); -} -bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { - ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); + // Open the file with exclusive r/w/d access, and allow the caller to decide + // to mark it for deletion upon close after the fact. + constexpr uint32_t kFlags = File::FLAG_CREATE | File::FLAG_READ | + File::FLAG_WRITE | File::FLAG_EXCLUSIVE_READ | + File::FLAG_EXCLUSIVE_WRITE | + File::FLAG_CAN_DELETE_ON_CLOSE; // Use GUID instead of ::GetTempFileName() to generate unique file names. // "Due to the algorithm used to generate file names, GetTempFileName can @@ -547,39 +526,48 @@ bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { // https://msdn.microsoft.com/library/windows/desktop/aa364991.aspx FilePath temp_name; - bool create_file_success = false; + File file; // Although it is nearly impossible to get a duplicate name with GUID, we // still use a loop here in case it happens. for (int i = 0; i < 100; ++i) { temp_name = dir.Append(UTF8ToWide(GenerateGUID()) + FILE_PATH_LITERAL(".tmp")); - File file(temp_name, - File::FLAG_CREATE | File::FLAG_READ | File::FLAG_WRITE); - if (file.IsValid()) { - file.Close(); - create_file_success = true; + file.Initialize(temp_name, kFlags); + if (file.IsValid()) break; - } } - if (!create_file_success) { + if (!file.IsValid()) { DPLOG(WARNING) << "Failed to get temporary file name in " << dir.value(); - return false; + return file; } wchar_t long_temp_name[MAX_PATH + 1]; - DWORD long_name_len = + const DWORD long_name_len = GetLongPathName(temp_name.value().c_str(), long_temp_name, MAX_PATH); - if (long_name_len > MAX_PATH || long_name_len == 0) { + if (long_name_len != 0 && long_name_len <= MAX_PATH) { + *temp_file = + FilePath(FilePath::StringPieceType(long_temp_name, long_name_len)); + } else { // GetLongPathName() failed, but we still have a temporary file. *temp_file = std::move(temp_name); - return true; } - FilePath::StringPieceType long_temp_name_str(long_temp_name, long_name_len); - *temp_file = FilePath(long_temp_name_str); - return true; + return file; +} + +bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) { + return CreateAndOpenTemporaryFileInDir(dir, temp_file).IsValid(); +} + +ScopedFILE CreateAndOpenTemporaryStreamInDir(const FilePath& dir, + FilePath* path) { + // Open file in binary mode, to avoid problems with fwrite. On Windows + // it replaces \n's with \r\n's, which may surprise you. + // Reference: http://msdn.microsoft.com/en-us/library/h9t88zwz(VS.71).aspx + return ScopedFILE( + FileToFILE(CreateAndOpenTemporaryFileInDir(dir, path), "wb+")); } bool CreateTemporaryDirInDir(const FilePath& base_dir, @@ -629,8 +617,6 @@ bool CreateDirectoryAndGetError(const FilePath& full_path, const DWORD fileattr = ::GetFileAttributes(full_path_str); if (fileattr != INVALID_FILE_ATTRIBUTES) { if ((fileattr & FILE_ATTRIBUTE_DIRECTORY) != 0) { - DVLOG(1) << "CreateDirectory(" << full_path_str << "), " - << "directory already exists."; return true; } DLOG(WARNING) << "CreateDirectory(" << full_path_str << "), " @@ -814,6 +800,7 @@ FILE* OpenFile(const FilePath& filename, const char* mode) { } FILE* FileToFILE(File file, const char* mode) { + DCHECK(!file.async()); if (!file.IsValid()) return NULL; int fd = @@ -827,6 +814,29 @@ FILE* FileToFILE(File file, const char* mode) { return stream; } +File FILEToFile(FILE* file_stream) { + if (!file_stream) + return File(); + + int fd = _fileno(file_stream); + DCHECK_GE(fd, 0); + intptr_t file_handle = _get_osfhandle(fd); + DCHECK_NE(file_handle, reinterpret_cast<intptr_t>(INVALID_HANDLE_VALUE)); + + HANDLE other_handle = nullptr; + if (!::DuplicateHandle( + /*hSourceProcessHandle=*/GetCurrentProcess(), + reinterpret_cast<HANDLE>(file_handle), + /*hTargetProcessHandle=*/GetCurrentProcess(), &other_handle, + /*dwDesiredAccess=*/0, + /*bInheritHandle=*/FALSE, + /*dwOptions=*/DUPLICATE_SAME_ACCESS)) { + return File(File::GetLastFileError()); + } + + return File(ScopedPlatformFile(other_handle)); +} + int ReadFile(const FilePath& filename, char* data, int max_size) { ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK); win::ScopedHandle file(CreateFile(filename.value().c_str(), GENERIC_READ, diff --git a/chromium/base/files/file_win.cc b/chromium/base/files/file_win.cc index 86434ca3ed3..7a3f4cdefb0 100644 --- a/chromium/base/files/file_win.cc +++ b/chromium/base/files/file_win.cc @@ -7,8 +7,9 @@ #include <io.h> #include <stdint.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/metrics/histogram_functions.h" +#include "base/notreached.h" #include "base/strings/string_util.h" #include "base/threading/scoped_blocking_call.h" diff --git a/chromium/base/files/important_file_writer.cc b/chromium/base/files/important_file_writer.cc index 33a39274f18..79921df5c1f 100644 --- a/chromium/base/files/important_file_writer.cc +++ b/chromium/base/files/important_file_writer.cc @@ -7,6 +7,8 @@ #include <stddef.h> #include <stdint.h> #include <stdio.h> + +#include <algorithm> #include <string> #include <utility> @@ -17,15 +19,18 @@ #include "base/files/file.h" #include "base/files/file_path.h" #include "base/files/file_util.h" +#include "base/files/important_file_writer_cleaner.h" #include "base/logging.h" #include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_macros.h" #include "base/numerics/safe_conversions.h" +#include "base/sequenced_task_runner.h" #include "base/stl_util.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_util.h" #include "base/task_runner.h" #include "base/task_runner_util.h" +#include "base/threading/sequenced_task_runner_handle.h" #include "base/threading/thread.h" #include "base/time/time.h" #include "build/build_config.h" @@ -36,17 +41,15 @@ namespace { constexpr auto kDefaultCommitInterval = TimeDelta::FromSeconds(10); -// This enum is used to define the buckets for an enumerated UMA histogram. -// Hence, -// (a) existing enumerated constants should never be deleted or reordered, and -// (b) new constants should only be appended at the end of the enumeration. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum TempFileFailure { - FAILED_CREATING, - FAILED_OPENING, - FAILED_CLOSING, // Unused. - FAILED_WRITING, - FAILED_RENAMING, - FAILED_FLUSHING, + FAILED_CREATING = 0, + // FAILED_OPENING = 1, + // FAILED_CLOSING = 2, + FAILED_WRITING = 3, + FAILED_RENAMING = 4, + FAILED_FLUSHING = 5, TEMP_FILE_FAILURE_MAX }; @@ -81,9 +84,80 @@ void LogFailure(const FilePath& path, DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message; } -// Helper function to call WriteFileAtomically() with a -// std::unique_ptr<std::string>. -void WriteScopedStringToFileAtomically( +// Deletes the file named |tmp_file_path| (which may be open as |tmp_file|), +// retrying on the same sequence after some delay in case of error. It is sadly +// common that third-party software on Windows may open the temp file and map it +// into its own address space, which prevents others from marking it for +// deletion (even if opening it for deletion was possible). |histogram_suffix| +// is a (possibly empty) suffix for metrics. |attempt| is the number of failed +// previous attempts to the delete the file (defaults to 0). +void DeleteTmpFileWithRetry(File tmp_file, + const FilePath& tmp_file_path, + StringPiece histogram_suffix, + int attempt = 0) { +#if defined(OS_WIN) + // Mark the file for deletion when it is closed and then close it implicitly. + if (tmp_file.IsValid()) { + if (tmp_file.DeleteOnClose(true)) + return; + // The file was opened with exclusive r/w access, so it would be very odd + // for this to fail. + UmaHistogramExactLinearWithSuffix( + "ImportantFile.DeleteOnCloseError", histogram_suffix, + -File::GetLastFileError(), -File::FILE_ERROR_MAX); + // Go ahead and close the file. The call to DeleteFile below will basically + // repeat the above, but maybe it will somehow succeed. + tmp_file.Close(); + } +#endif + + // Retry every 250ms for up to two seconds. These values were pulled out of + // thin air, and may be adjusted in the future based on the metrics collected. + static constexpr int kMaxDeleteAttempts = 8; + static constexpr TimeDelta kDeleteFileRetryDelay = + TimeDelta::FromMilliseconds(250); + + if (!DeleteFile(tmp_file_path, /*recursive=*/false)) { + const auto last_file_error = File::GetLastFileError(); + if (++attempt >= kMaxDeleteAttempts) { + // All retries have been exhausted; record the final error. + UmaHistogramExactLinearWithSuffix( + "ImportantFile.FileDeleteRetryExceededError", histogram_suffix, + -last_file_error, -File::FILE_ERROR_MAX); + } else if (!SequencedTaskRunnerHandle::IsSet() || + !SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce(&DeleteTmpFileWithRetry, base::File(), + tmp_file_path, histogram_suffix, attempt), + kDeleteFileRetryDelay)) { + // Retries are not possible, so record the simple delete error. + UmaHistogramExactLinearWithSuffix("ImportantFile.FileDeleteNoRetryError", + histogram_suffix, -last_file_error, + -File::FILE_ERROR_MAX); + } + } else if (attempt) { + // Record the number of attempts to reach success only if more than one was + // needed. + UmaHistogramExactLinearWithSuffix( + "ImportantFile.FileDeleteRetrySuccessCount", histogram_suffix, attempt, + kMaxDeleteAttempts); + } +} + +} // namespace + +// static +bool ImportantFileWriter::WriteFileAtomically(const FilePath& path, + StringPiece data, + StringPiece histogram_suffix) { + // Calling the impl by way of the public WriteFileAtomically, so + // |from_instance| is false. + return WriteFileAtomicallyImpl(path, data, histogram_suffix, + /*from_instance=*/false); +} + +// static +void ImportantFileWriter::WriteScopedStringToFileAtomically( const FilePath& path, std::unique_ptr<std::string> data, OnceClosure before_write_callback, @@ -92,28 +166,26 @@ void WriteScopedStringToFileAtomically( if (!before_write_callback.is_null()) std::move(before_write_callback).Run(); - bool result = - ImportantFileWriter::WriteFileAtomically(path, *data, histogram_suffix); + // Calling the impl by way of the private WriteScopedStringToFileAtomically, + // which originated from an ImportantFileWriter instance, so |from_instance| + // is true. + const bool result = WriteFileAtomicallyImpl(path, *data, histogram_suffix, + /*from_instance=*/true); if (!after_write_callback.is_null()) std::move(after_write_callback).Run(result); } -void DeleteTmpFile(const FilePath& tmp_file_path, - StringPiece histogram_suffix) { - if (!DeleteFile(tmp_file_path, false)) { - UmaHistogramExactLinearWithSuffix( - "ImportantFile.FileDeleteError", histogram_suffix, - -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX); - } -} - -} // namespace - // static -bool ImportantFileWriter::WriteFileAtomically(const FilePath& path, - StringPiece data, - StringPiece histogram_suffix) { +bool ImportantFileWriter::WriteFileAtomicallyImpl(const FilePath& path, + StringPiece data, + StringPiece histogram_suffix, + bool from_instance) { +#if defined(OS_WIN) + if (!from_instance) + ImportantFileWriterCleaner::AddDirectory(path.DirName()); +#endif + #if defined(OS_WIN) && DCHECK_IS_ON() // In https://crbug.com/920174, we have cases where CreateTemporaryFileInDir // hits a DCHECK because creation fails with no indication why. Pull the path @@ -142,59 +214,60 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path, // as target file, so it can be moved in one step, and that the temp file // is securely created. FilePath tmp_file_path; - if (!CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) { - const auto last_file_error = base::File::GetLastFileError(); - UmaHistogramExactLinearWithSuffix("ImportantFile.FileCreateError", - histogram_suffix, -last_file_error, - -base::File::FILE_ERROR_MAX); - LogFailure(path, histogram_suffix, FAILED_CREATING, - "could not create temporary file"); - return false; - } - - File tmp_file(tmp_file_path, File::FLAG_OPEN | File::FLAG_WRITE); + File tmp_file = + CreateAndOpenTemporaryFileInDir(path.DirName(), &tmp_file_path); if (!tmp_file.IsValid()) { UmaHistogramExactLinearWithSuffix( - "ImportantFile.FileOpenError", histogram_suffix, - -tmp_file.error_details(), -base::File::FILE_ERROR_MAX); - LogFailure(path, histogram_suffix, FAILED_OPENING, - "could not open temporary file"); - DeleteFile(tmp_file_path, false); + "ImportantFile.FileCreateError", histogram_suffix, + -tmp_file.error_details(), -File::FILE_ERROR_MAX); + LogFailure(path, histogram_suffix, FAILED_CREATING, + "could not create temporary file"); return false; } - // If this fails in the wild, something really bad is going on. - const int data_length = checked_cast<int32_t>(data.length()); - int bytes_written = tmp_file.Write(0, data.data(), data_length); - if (bytes_written < data_length) { - UmaHistogramExactLinearWithSuffix( - "ImportantFile.FileWriteError", histogram_suffix, - -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX); + // Don't write all of the data at once because this can lead to kernel + // address-space exhaustion on 32-bit Windows (see https://crbug.com/1001022 + // for details). + constexpr ptrdiff_t kMaxWriteAmount = 8 * 1024 * 1024; + int bytes_written = 0; + for (const char *scan = data.data(), *const end = scan + data.length(); + scan < end; scan += bytes_written) { + const int write_amount = std::min(kMaxWriteAmount, end - scan); + bytes_written = tmp_file.WriteAtCurrentPos(scan, write_amount); + if (bytes_written != write_amount) { + UmaHistogramExactLinearWithSuffix( + "ImportantFile.FileWriteError", histogram_suffix, + -File::GetLastFileError(), -File::FILE_ERROR_MAX); + LogFailure( + path, histogram_suffix, FAILED_WRITING, + "error writing, bytes_written=" + NumberToString(bytes_written)); + DeleteTmpFileWithRetry(std::move(tmp_file), tmp_file_path, + histogram_suffix); + return false; + } } - bool flush_success = tmp_file.Flush(); - tmp_file.Close(); - if (bytes_written < data_length) { - LogFailure(path, histogram_suffix, FAILED_WRITING, - "error writing, bytes_written=" + NumberToString(bytes_written)); - DeleteTmpFile(tmp_file_path, histogram_suffix); - return false; - } - - if (!flush_success) { + if (!tmp_file.Flush()) { LogFailure(path, histogram_suffix, FAILED_FLUSHING, "error flushing"); - DeleteTmpFile(tmp_file_path, histogram_suffix); + DeleteTmpFileWithRetry(std::move(tmp_file), tmp_file_path, + histogram_suffix); return false; } - base::File::Error replace_file_error = base::File::FILE_OK; + File::Error replace_file_error = File::FILE_OK; + + // The file must be closed for ReplaceFile to do its job, which opens up a + // race with other software that may open the temp file (e.g., an A/V scanner + // doing its job without oplocks). Close as late as possible to improve the + // chances that the other software will lose the race. + tmp_file.Close(); if (!ReplaceFile(tmp_file_path, path, &replace_file_error)) { UmaHistogramExactLinearWithSuffix("ImportantFile.FileRenameError", histogram_suffix, -replace_file_error, - -base::File::FILE_ERROR_MAX); + -File::FILE_ERROR_MAX); LogFailure(path, histogram_suffix, FAILED_RENAMING, "could not rename temporary file"); - DeleteTmpFile(tmp_file_path, histogram_suffix); + DeleteTmpFileWithRetry(File(), tmp_file_path, histogram_suffix); return false; } @@ -221,6 +294,9 @@ ImportantFileWriter::ImportantFileWriter( commit_interval_(interval), histogram_suffix_(histogram_suffix ? histogram_suffix : "") { DCHECK(task_runner_); +#if defined(OS_WIN) + ImportantFileWriterCleaner::AddDirectory(path.DirName()); +#endif } ImportantFileWriter::~ImportantFileWriter() { @@ -248,7 +324,9 @@ void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) { std::move(before_next_write_callback_), std::move(after_next_write_callback_), histogram_suffix_)); - if (!task_runner_->PostTask(FROM_HERE, MakeCriticalClosure(task))) { + if (!task_runner_->PostTask( + FROM_HERE, + MakeCriticalClosure("ImportantFileWriter::WriteNow", task))) { // Posting the task to background message loop is not expected // to fail, but if it does, avoid losing data and just hit the disk // on the current thread. diff --git a/chromium/base/files/important_file_writer.h b/chromium/base/files/important_file_writer.h index d3a42eff790..1accf1d817d 100644 --- a/chromium/base/files/important_file_writer.h +++ b/chromium/base/files/important_file_writer.h @@ -5,6 +5,7 @@ #ifndef BASE_FILES_IMPORTANT_FILE_WRITER_H_ #define BASE_FILES_IMPORTANT_FILE_WRITER_H_ +#include <memory> #include <string> #include "base/base_export.h" @@ -122,6 +123,25 @@ class BASE_EXPORT ImportantFileWriter { } OneShotTimer& timer() { return timer_override_ ? *timer_override_ : timer_; } + // Helper function to call WriteFileAtomically() with a + // std::unique_ptr<std::string>. + static void WriteScopedStringToFileAtomically( + const FilePath& path, + std::unique_ptr<std::string> data, + OnceClosure before_write_callback, + OnceCallback<void(bool success)> after_write_callback, + const std::string& histogram_suffix); + + // Writes |data| to |path|, recording histograms with an optional + // |histogram_suffix|. |from_instance| indicates whether the call originates + // from an instance of ImportantFileWriter or a direct call to + // WriteFileAtomically. When false, the directory containing |path| is added + // to the set cleaned by the ImportantFileWriterCleaner (Windows only). + static bool WriteFileAtomicallyImpl(const FilePath& path, + StringPiece data, + StringPiece histogram_suffix, + bool from_instance); + void ClearPendingWrite(); // Invoked synchronously on the next write event. diff --git a/chromium/base/files/important_file_writer_cleaner.cc b/chromium/base/files/important_file_writer_cleaner.cc new file mode 100644 index 00000000000..c7458e5ef54 --- /dev/null +++ b/chromium/base/files/important_file_writer_cleaner.cc @@ -0,0 +1,214 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/files/important_file_writer_cleaner.h" + +#include <algorithm> +#include <functional> +#include <iterator> +#include <utility> + +#include "base/bind.h" +#include "base/files/file_enumerator.h" +#include "base/files/file_util.h" +#include "base/metrics/histogram_functions.h" +#include "base/process/process.h" +#include "base/task/task_traits.h" +#include "base/task/thread_pool.h" +#include "base/threading/sequenced_task_runner_handle.h" + +namespace base { + +// static +ImportantFileWriterCleaner& ImportantFileWriterCleaner::GetInstance() { + static NoDestructor<ImportantFileWriterCleaner> instance; + return *instance; +} + +// static +void ImportantFileWriterCleaner::AddDirectory(const FilePath& directory) { + auto& instance = GetInstance(); + scoped_refptr<SequencedTaskRunner> task_runner; + { + AutoLock scoped_lock(instance.task_runner_lock_); + task_runner = instance.task_runner_; + } + if (!task_runner) + return; + if (task_runner->RunsTasksInCurrentSequence()) { + instance.AddDirectoryImpl(directory); + } else { + // Unretained is safe here since the cleaner instance is never destroyed. + task_runner->PostTask( + FROM_HERE, BindOnce(&ImportantFileWriterCleaner::AddDirectoryImpl, + Unretained(&instance), directory)); + } +} + +void ImportantFileWriterCleaner::Initialize() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + AutoLock scoped_lock(task_runner_lock_); + DCHECK(!task_runner_ || task_runner_ == SequencedTaskRunnerHandle::Get()); + task_runner_ = SequencedTaskRunnerHandle::Get(); +} + +void ImportantFileWriterCleaner::Start() { +#if DCHECK_IS_ON() + { + AutoLock scoped_lock(task_runner_lock_); + DCHECK(task_runner_); + } +#endif + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + if (is_started()) + return; + + started_ = true; + + if (!pending_directories_.empty()) + ScheduleTask(); +} + +void ImportantFileWriterCleaner::Stop() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + if (!is_started()) + return; + + if (is_running()) + stop_flag_.store(true, std::memory_order_relaxed); + else + DoStop(); +} + +void ImportantFileWriterCleaner::UninitializeForTesting() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(!is_started()); + { + AutoLock scoped_lock(task_runner_lock_); + task_runner_ = nullptr; + } + // AddDirectory may have been called after Stop. Clear the containers just in + // case. + important_directories_.clear(); + pending_directories_.clear(); + DETACH_FROM_SEQUENCE(sequence_checker_); +} + +ImportantFileWriterCleaner::ImportantFileWriterCleaner() + : upper_bound_time_(Process::Current().CreationTime()) { + DETACH_FROM_SEQUENCE(sequence_checker_); +} + +void ImportantFileWriterCleaner::AddDirectoryImpl(const FilePath& directory) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + if (!important_directories_.insert(directory).second) + return; // This directory has already been seen. + + pending_directories_.push_back(directory); + + if (!is_started()) + return; // Nothing more to do if Start() has not been called. + + // Start the background task if it's not already running. If it is running, a + // new task will be posted on completion of the current one by + // OnBackgroundTaskFinished to handle all directories added while it was + // running. + if (!is_running()) + ScheduleTask(); +} + +void ImportantFileWriterCleaner::ScheduleTask() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(is_started()); + DCHECK(!is_running()); + DCHECK(!pending_directories_.empty()); + DCHECK(!stop_flag_.load(std::memory_order_relaxed)); + + // Pass the set of directories to be processed. + running_ = ThreadPool::PostTaskAndReplyWithResult( + FROM_HERE, + {TaskPriority::BEST_EFFORT, TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN, + MayBlock()}, + BindOnce(&ImportantFileWriterCleaner::CleanInBackground, + upper_bound_time_, std::move(pending_directories_), + std::ref(stop_flag_)), + // Unretained is safe here since the cleaner instance is never destroyed. + BindOnce(&ImportantFileWriterCleaner::OnBackgroundTaskFinished, + Unretained(this))); +} + +// static +bool ImportantFileWriterCleaner::CleanInBackground( + Time upper_bound_time, + std::vector<FilePath> directories, + std::atomic_bool& stop_flag) { + DCHECK(!directories.empty()); + bool stop = false; + for (auto scan = directories.begin(), end = directories.end(); scan != end; + ++scan) { + const auto& directory = *scan; + ClampedNumeric<int> successes; + ClampedNumeric<int> fails; + FileEnumerator file_enum(directory, /*recursive=*/false, + FileEnumerator::FILES, FILE_PATH_LITERAL("*.tmp")); + for (FilePath path = file_enum.Next(); !path.empty(); + path = file_enum.Next()) { + const FileEnumerator::FileInfo info = file_enum.GetInfo(); + if (info.GetLastModifiedTime() >= upper_bound_time) + continue; + if (DeleteFile(path, /*recursive=*/false)) + ++successes; + else + ++fails; + // Break out without checking for the next file if a stop is requested. + stop = stop_flag.load(std::memory_order_relaxed); + if (stop) + break; + } + // Record metrics for this directory regardless of whether it was fully + // processed or if the cleaner is being stopped. + if (successes != 0 || fails != 0) { + UmaHistogramCounts1M("Windows.TmpFileDeleter.SuccessCount", successes); + UmaHistogramCounts1M("Windows.TmpFileDeleter.FailCount", fails); + } + if (stop) + return false; + } + return true; +} + +void ImportantFileWriterCleaner::OnBackgroundTaskFinished( + bool processing_completed) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + running_ = false; + + // There are no other accessors of |stop_flag_| at this point, so atomic + // operations aren't needed. There is no way to read it without such, so use + // the same (relaxed) ordering as elsewhere. + const bool stop = stop_flag_.exchange(false, std::memory_order_relaxed); + DCHECK(stop || processing_completed); + + if (stop) { + DoStop(); + } else if (!pending_directories_.empty()) { + // Run the task again with the new directories. + ScheduleTask(); + } // else do nothing until a new directory is added. +} + +void ImportantFileWriterCleaner::DoStop() { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + DCHECK(is_started()); + DCHECK(!is_running()); + + important_directories_.clear(); + pending_directories_.clear(); + started_ = false; +} + +} // namespace base diff --git a/chromium/base/files/important_file_writer_cleaner.h b/chromium/base/files/important_file_writer_cleaner.h new file mode 100644 index 00000000000..1cea7418474 --- /dev/null +++ b/chromium/base/files/important_file_writer_cleaner.h @@ -0,0 +1,150 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_FILES_IMPORTANT_FILE_WRITER_CLEANER_H_ +#define BASE_FILES_IMPORTANT_FILE_WRITER_CLEANER_H_ + +#include <atomic> +#include <vector> + +#include "base/base_export.h" +#include "base/containers/flat_set.h" +#include "base/files/file_path.h" +#include "base/memory/scoped_refptr.h" +#include "base/no_destructor.h" +#include "base/numerics/clamped_math.h" +#include "base/optional.h" +#include "base/sequence_checker.h" +#include "base/synchronization/lock.h" +#include "base/thread_annotations.h" +#include "base/time/time.h" + +namespace base { + +class SequencedTaskRunner; + +// A cleaner for forgotten .tmp files left behind by ImportantFileWriter; see +// https://crbug.com/1075917. +// +// ImportantFileWriter has the potential to leak .tmp files in case of a crash +// or power failure during processing, or in case of interference by third-party +// software. This class implements a singleton that makes a single scan over +// given directories to delete any *.tmp files older than the current process. +// Processes that use ImportantFileWriter are expected to call the instance's +// Start method at some point during startup to enable the cleaner. +// ImportantFileWriter calls the AddDirectory method to provide the directory +// hosting an "important" file. Hosting processes are expected to call the Stop +// method at shutdown so that metrics of an in-process execution can be +// recorded. +// +// The deletion scan takes place in a background task. Metrics are recorded when +// a directory is fully processed, or shortly after the hosting process calls +// the Stop method. +class BASE_EXPORT ImportantFileWriterCleaner { + public: + // Gets the process-wide single instance of the cleaner. + static ImportantFileWriterCleaner& GetInstance(); + + ImportantFileWriterCleaner(const ImportantFileWriterCleaner&) = delete; + ImportantFileWriterCleaner& operator=(const ImportantFileWriterCleaner&) = + delete; + ~ImportantFileWriterCleaner() = delete; + + // Adds |directory| to the set to be cleaned if it has not already been + // handled. If the Start method has already been called, the cleaner will + // begin processing |directory| after all others that have previously been + // added have been cleaned (immediately, if there are no others). Any calls to + // this method prior to Initialize are ignored. + static void AddDirectory(const FilePath& directory); + + // Initializes the instance on the hosting process's main sequence (the one on + // which Start and Stop will ultimately be called). It is safe to call this + // any number of times from the main sequence. + void Initialize(); + + // Starts the instance. If any directories have already been added, the + // background task is posted immediately to begin processing them. Otherwise, + // the next call to AddDirectory will begin processing. + void Start(); + + // Stops the instance. The background task, if it is active, is notified to + // record metrics on the directory in progress and exit. + void Stop(); + + // Brings the instance back to the uninitialized state. This should be used in + // tests that call Initialize so that the instance forgets about the test's + // main thread task runner. + void UninitializeForTesting(); + + private: + friend class NoDestructor<ImportantFileWriterCleaner>; + + ImportantFileWriterCleaner(); + + // True once Start() has been called; false following Stop(); + bool is_started() const { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + return started_; + } + + // True once the background task has been posted; false once it returns. + bool is_running() const { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + return running_; + } + + // The workhorse for AddDirectory. + void AddDirectoryImpl(const FilePath& directory); + + // Schedules the background task to run, processing all directories that have + // accumulated. + void ScheduleTask(); + + // Iterates over the contents of |directories|, deleting all *.tmp files older + // than |upper_bound_time|. Checks |stop_flag| after each deletion to see if + // the instance has been stopped by the host process. Returns false if + // processing was interrupted by |stop_flag| having been set, or true + // indicating that all directories were fully processed. + static bool CleanInBackground(Time upper_bound_time, + std::vector<FilePath> directories, + std::atomic_bool& stop_flag); + + // Cleans up after completion of the background task. |processing_completed| + // is true when all directories were fully processed, or false if the task + // potentially exited early in response to Stop(). + void OnBackgroundTaskFinished(bool processing_completed); + + // Finalizes a request to stop after the background task returns. + void DoStop(); + + // Provides exclusive access to the instance's task runner. + Lock task_runner_lock_; + + // The hosting process's main thread task runner. + scoped_refptr<SequencedTaskRunner> task_runner_ GUARDED_BY(task_runner_lock_); + + // The time before which any discovered temporary file is presumed to be + // unused, and therefore safe to delete. + const Time upper_bound_time_; + + // The set of all directories hosting files written by an ImportantFileWriter. + flat_set<FilePath> important_directories_ + GUARDED_BY_CONTEXT(sequence_checker_); + + // Directories added to the instance waiting either for a call to Start() or + // waiting for an existing background task to complete. + std::vector<FilePath> pending_directories_ + GUARDED_BY_CONTEXT(sequence_checker_); + + std::atomic_bool stop_flag_{false}; + + bool started_ GUARDED_BY_CONTEXT(sequence_checker_) = false; + bool running_ GUARDED_BY_CONTEXT(sequence_checker_) = false; + + SEQUENCE_CHECKER(sequence_checker_); +}; + +} // namespace base + +#endif // BASE_FILES_IMPORTANT_FILE_WRITER_CLEANER_H_ diff --git a/chromium/base/files/important_file_writer_cleaner_unittest.cc b/chromium/base/files/important_file_writer_cleaner_unittest.cc new file mode 100644 index 00000000000..6b0d6a5e746 --- /dev/null +++ b/chromium/base/files/important_file_writer_cleaner_unittest.cc @@ -0,0 +1,350 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/files/important_file_writer_cleaner.h" + +#include "base/files/file.h" +#include "base/files/file_path.h" +#include "base/files/file_util.h" +#include "base/files/scoped_temp_dir.h" +#include "base/logging.h" +#include "base/optional.h" +#include "base/process/process.h" +#include "base/strings/stringprintf.h" +#include "base/task/thread_pool.h" +#include "base/test/bind_test_util.h" +#include "base/test/metrics/histogram_tester.h" +#include "base/test/task_environment.h" +#include "base/test/test_waitable_event.h" +#include "base/threading/sequenced_task_runner_handle.h" +#include "base/time/time.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using ::testing::ElementsAre; + +namespace base { + +class ImportantFileWriterCleanerTest : public ::testing::Test { + protected: + // Initializes and Starts the global cleaner at construction and Stops it + // at destruction. ("Lifetime" refers to its activity rather than existence.) + class ScopedCleanerLifetime { + public: + ScopedCleanerLifetime() { + auto& instance = ImportantFileWriterCleaner::GetInstance(); + instance.Initialize(); + instance.Start(); + } + ScopedCleanerLifetime(const ScopedCleanerLifetime&) = delete; + ScopedCleanerLifetime& operator=(const ScopedCleanerLifetime&) = delete; + ~ScopedCleanerLifetime() { + ImportantFileWriterCleaner::GetInstance().Stop(); + } + }; + + void SetUp() override; + void TearDown() override; + + const FilePath& dir_1() const { return dir_1_; } + const FilePath& dir_1_file_new() const { return dir_1_file_new_; } + const FilePath& dir_1_file_old() const { return dir_1_file_old_; } + const FilePath& dir_1_file_other() const { return dir_1_file_other_; } + const FilePath& dir_2() const { return dir_2_; } + const FilePath& dir_2_file_new() const { return dir_2_file_new_; } + const FilePath& dir_2_file_old() const { return dir_2_file_old_; } + const FilePath& dir_2_file_other() const { return dir_2_file_other_; } + + void StartCleaner() { + DCHECK(!cleaner_lifetime_.has_value()); + cleaner_lifetime_.emplace(); + } + + void StopCleaner() { + DCHECK(cleaner_lifetime_.has_value()); + cleaner_lifetime_.reset(); + } + + void CreateNewFile(const FilePath& path) { + File file(path, File::FLAG_CREATE | File::FLAG_WRITE); + ASSERT_TRUE(file.IsValid()); + } + + void CreateOldFile(const FilePath& path) { + const Time old_time = + Process::Current().CreationTime() - TimeDelta::FromSeconds(1); + File file(path, File::FLAG_CREATE | File::FLAG_WRITE); + ASSERT_TRUE(file.IsValid()); + ASSERT_TRUE(file.SetTimes(Time::Now(), old_time)); + } + + ScopedTempDir temp_dir_; + test::TaskEnvironment task_environment_; + HistogramTester histogram_tester_; + + private: + FilePath dir_1_; + FilePath dir_2_; + FilePath dir_1_file_new_; + FilePath dir_1_file_old_; + FilePath dir_1_file_other_; + FilePath dir_2_file_new_; + FilePath dir_2_file_old_; + FilePath dir_2_file_other_; + Optional<ScopedCleanerLifetime> cleaner_lifetime_; +}; + +void ImportantFileWriterCleanerTest::SetUp() { + ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); + + // Create two directories that will hold files to be cleaned. + dir_1_ = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("dir_1")); + ASSERT_TRUE(CreateDirectory(dir_1_)); + dir_2_ = temp_dir_.GetPath().Append(FILE_PATH_LITERAL("dir_2")); + ASSERT_TRUE(CreateDirectory(dir_2_)); + + // Create some old and new files in each dir. + dir_1_file_new_ = dir_1_.Append(FILE_PATH_LITERAL("new.tmp")); + ASSERT_NO_FATAL_FAILURE(CreateNewFile(dir_1_file_new_)); + + dir_1_file_old_ = dir_1_.Append(FILE_PATH_LITERAL("old.tmp")); + ASSERT_NO_FATAL_FAILURE(CreateOldFile(dir_1_file_old_)); + + dir_1_file_other_ = dir_1_.Append(FILE_PATH_LITERAL("other.nottmp")); + ASSERT_NO_FATAL_FAILURE(CreateOldFile(dir_1_file_other_)); + + dir_2_file_new_ = dir_2_.Append(FILE_PATH_LITERAL("new.tmp")); + ASSERT_NO_FATAL_FAILURE(CreateNewFile(dir_2_file_new_)); + + dir_2_file_old_ = dir_2_.Append(FILE_PATH_LITERAL("old.tmp")); + ASSERT_NO_FATAL_FAILURE(CreateOldFile(dir_2_file_old_)); + + dir_2_file_other_ = dir_2_.Append(FILE_PATH_LITERAL("other.nottmp")); + ASSERT_NO_FATAL_FAILURE(CreateOldFile(dir_2_file_other_)); +} + +void ImportantFileWriterCleanerTest::TearDown() { + cleaner_lifetime_.reset(); + task_environment_.RunUntilIdle(); + ImportantFileWriterCleaner::GetInstance().UninitializeForTesting(); + EXPECT_TRUE(temp_dir_.Delete()); +} + +// Tests that adding a directory without initializing the cleaner does nothing. +TEST_F(ImportantFileWriterCleanerTest, NotInitializedNoOpAdd) { + ImportantFileWriterCleaner::AddDirectory(dir_1()); + task_environment_.RunUntilIdle(); + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_TRUE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_TRUE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); +} + +// Tests that adding a directory without starting the cleaner does nothing. +TEST_F(ImportantFileWriterCleanerTest, NotStartedNoOpAdd) { + ImportantFileWriterCleaner::GetInstance().Initialize(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + task_environment_.RunUntilIdle(); + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_TRUE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_TRUE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); +} + +// Tests that starting and stopping does no harm. +TEST_F(ImportantFileWriterCleanerTest, StartStop) { + StartCleaner(); + StopCleaner(); +} + +// Tests that adding a directory then starting the cleaner works. +TEST_F(ImportantFileWriterCleanerTest, AddStart) { + ImportantFileWriterCleaner::GetInstance().Initialize(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + StartCleaner(); + task_environment_.RunUntilIdle(); + + // The old file should have been cleaned from the added dir. + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_FALSE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_TRUE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); + + // There should be 1 success and 0 failure logged for the one dir. + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 1); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 1); +} + +// Tests that adding multiple directories before starting cleans both. +TEST_F(ImportantFileWriterCleanerTest, AddAddStart) { + ImportantFileWriterCleaner::GetInstance().Initialize(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + ImportantFileWriterCleaner::AddDirectory(dir_2()); + StartCleaner(); + task_environment_.RunUntilIdle(); + + // The old file should have been cleaned from both added dirs. + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_FALSE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_FALSE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); + + // There should be 2 success and 2 failure samples (one for each dir). + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 2); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 2); +} + +// Tests that starting the cleaner then adding a directory works. +TEST_F(ImportantFileWriterCleanerTest, StartAdd) { + StartCleaner(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + task_environment_.RunUntilIdle(); + + // The old file should have been cleaned from the added dir. + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_FALSE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_TRUE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); + + // There should be 1 success and 0 failure logged for the one dir. + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 1); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 1); +} + +// Tests that starting the cleaner twice doesn't cause it to clean twice. +TEST_F(ImportantFileWriterCleanerTest, StartTwice) { + StartCleaner(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + task_environment_.RunUntilIdle(); + + // Recreate the old file that was just cleaned. + ASSERT_NO_FATAL_FAILURE(CreateOldFile(dir_1_file_old())); + + // Start again and make sure it wasn't cleaned again. + ImportantFileWriterCleaner::GetInstance().Start(); + task_environment_.RunUntilIdle(); + + EXPECT_TRUE(PathExists(dir_1_file_old())); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 1); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 1); +} + +// Tests that adding a dir twice doesn't cause it to clean twice. +TEST_F(ImportantFileWriterCleanerTest, AddTwice) { + StartCleaner(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + task_environment_.RunUntilIdle(); + + // Recreate the old file that was just cleaned. + ASSERT_NO_FATAL_FAILURE(CreateOldFile(dir_1_file_old())); + + // Add the directory again and make sure nothing else is cleaned. + ImportantFileWriterCleaner::AddDirectory(dir_1()); + task_environment_.RunUntilIdle(); + + EXPECT_TRUE(PathExists(dir_1_file_old())); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 1); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 1); +} + +// Tests that AddDirectory called from another thread properly bounces back to +// the main thread for processing. +TEST_F(ImportantFileWriterCleanerTest, StartAddFromOtherThread) { + StartCleaner(); + + // Add from the ThreadPool and wait for it to finish. + TestWaitableEvent waitable_event; + ThreadPool::PostTask(FROM_HERE, BindLambdaForTesting([&]() { + ImportantFileWriterCleaner::AddDirectory(dir_1()); + waitable_event.Signal(); + })); + waitable_event.Wait(); + + // Allow the cleaner to run. + task_environment_.RunUntilIdle(); + + // The old file should have been cleaned from the added dir. + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_FALSE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_TRUE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); + + // There should be 1 success and 0 failure logged for the one dir. + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 1); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 1); +} + +// Tests that adding a directory while a session is processing a previous +// directory works. +TEST_F(ImportantFileWriterCleanerTest, AddStartAdd) { + ImportantFileWriterCleaner::GetInstance().Initialize(); + ImportantFileWriterCleaner::AddDirectory(dir_1()); + StartCleaner(); + ImportantFileWriterCleaner::AddDirectory(dir_2()); + task_environment_.RunUntilIdle(); + + // The old file should have been cleaned from both added dirs. + EXPECT_TRUE(PathExists(dir_1_file_new())); + EXPECT_FALSE(PathExists(dir_1_file_old())); + EXPECT_TRUE(PathExists(dir_1_file_other())); + EXPECT_TRUE(PathExists(dir_2_file_new())); + EXPECT_FALSE(PathExists(dir_2_file_old())); + EXPECT_TRUE(PathExists(dir_2_file_other())); + + // There should be 2 success and 2 failure samples (one for each dir). + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.SuccessCount", 1, + 2); + histogram_tester_.ExpectUniqueSample("Windows.TmpFileDeleter.FailCount", 0, + 2); +} + +// Tests that stopping while the background task is running results in at least +// recording of partial metrics. +TEST_F(ImportantFileWriterCleanerTest, StopWhileRunning) { + ImportantFileWriterCleaner::GetInstance().Initialize(); + + // Create a great many old files in dir1. + for (int i = 0; i < 100; ++i) { + CreateOldFile( + dir_1().Append(StringPrintf(FILE_PATH_LITERAL("oldie%d.tmp"), i))); + } + + ImportantFileWriterCleaner::AddDirectory(dir_1()); + StartCleaner(); + + // It's possible that the background task will quickly delete all 100 files. + // In all likelihood, though, the stop flag will be read and processed before + // then. Either case is a success. + StopCleaner(); + task_environment_.RunUntilIdle(); + + // Expect a single sample indicating that one or more files were deleted. + histogram_tester_.ExpectTotalCount("Windows.TmpFileDeleter.SuccessCount", 1); +} + +} // namespace base diff --git a/chromium/base/files/important_file_writer_unittest.cc b/chromium/base/files/important_file_writer_unittest.cc index 1d6b6336f68..baa1d5d7139 100644 --- a/chromium/base/files/important_file_writer_unittest.cc +++ b/chromium/base/files/important_file_writer_unittest.cc @@ -10,9 +10,9 @@ #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" #include "base/location.h" -#include "base/logging.h" #include "base/macros.h" #include "base/memory/ptr_util.h" +#include "base/notreached.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" #include "base/test/metrics/histogram_tester.h" @@ -347,4 +347,15 @@ TEST_F(ImportantFileWriterTest, WriteFileAtomicallyHistogramSuffixTest) { histogram_tester.ExpectTotalCount("ImportantFile.FileCreateError.test", 1); } +// Test that the chunking to avoid very large writes works. +TEST_F(ImportantFileWriterTest, WriteLargeFile) { + // One byte larger than kMaxWriteAmount. + const std::string large_data(8 * 1024 * 1024 + 1, 'g'); + EXPECT_FALSE(PathExists(file_)); + EXPECT_TRUE(ImportantFileWriter::WriteFileAtomically(file_, large_data)); + std::string actual; + EXPECT_TRUE(ReadFileToString(file_, &actual)); + EXPECT_EQ(large_data, actual); +} + } // namespace base diff --git a/chromium/base/files/scoped_file.cc b/chromium/base/files/scoped_file.cc index 1b9227d9779..aa353f00474 100644 --- a/chromium/base/files/scoped_file.cc +++ b/chromium/base/files/scoped_file.cc @@ -4,7 +4,7 @@ #include "base/files/scoped_file.h" -#include "base/logging.h" +#include "base/check.h" #include "build/build_config.h" #if defined(OS_POSIX) || defined(OS_FUCHSIA) diff --git a/chromium/base/fuchsia/OWNERS b/chromium/base/fuchsia/OWNERS index c1b584511a6..2f2f09cbf1f 100644 --- a/chromium/base/fuchsia/OWNERS +++ b/chromium/base/fuchsia/OWNERS @@ -2,3 +2,7 @@ file://build/fuchsia/OWNERS # COMPONENT: Fuchsia # OS: Fuchsia # TEAM: cr-fuchsia@chromium.org + +# FilteredServiceDirectory is part of the Fuchsia sandbox. +per-file filtered_service_directory.*=set noparent +per-file filtered_service_directory.*=file://fuchsia/SECURITY_OWNERS diff --git a/chromium/base/fuchsia/default_context.cc b/chromium/base/fuchsia/default_context.cc index 654a9cb4720..932415422f1 100644 --- a/chromium/base/fuchsia/default_context.cc +++ b/chromium/base/fuchsia/default_context.cc @@ -7,7 +7,6 @@ #include <lib/sys/cpp/component_context.h> #include "base/fuchsia/file_utils.h" -#include "base/logging.h" #include "base/no_destructor.h" namespace base { diff --git a/chromium/base/fuchsia/default_job.cc b/chromium/base/fuchsia/default_job.cc index b23d1d1f457..3112b65c326 100644 --- a/chromium/base/fuchsia/default_job.cc +++ b/chromium/base/fuchsia/default_job.cc @@ -6,7 +6,7 @@ #include <zircon/types.h> -#include "base/logging.h" +#include "base/check_op.h" namespace base { diff --git a/chromium/base/fuchsia/file_utils_unittest.cc b/chromium/base/fuchsia/file_utils_unittest.cc index 11bb1a25027..810732bdbed 100644 --- a/chromium/base/fuchsia/file_utils_unittest.cc +++ b/chromium/base/fuchsia/file_utils_unittest.cc @@ -35,10 +35,10 @@ TEST_F(OpenDirectoryTest, OpenNonExistent) { // OpenDirectory() should open only directories. TEST_F(OpenDirectoryTest, OpenFile) { auto file_path = temp_dir.GetPath().AppendASCII("test_file"); - ASSERT_TRUE(WriteFile(file_path, "foo", 3)); + ASSERT_TRUE(WriteFile(file_path, "foo")); auto dir = OpenDirectory(file_path); ASSERT_FALSE(dir); } } // namespace fuchsia -} // namespace base
\ No newline at end of file +} // namespace base diff --git a/chromium/base/fuchsia/fuchsia_logging.h b/chromium/base/fuchsia/fuchsia_logging.h index ba55f8db86e..46d33e774f3 100644 --- a/chromium/base/fuchsia/fuchsia_logging.h +++ b/chromium/base/fuchsia/fuchsia_logging.h @@ -24,7 +24,7 @@ class BASE_EXPORT ZxLogMessage : public logging::LogMessage { int line, LogSeverity severity, zx_status_t zx_err); - ~ZxLogMessage(); + ~ZxLogMessage() override; private: zx_status_t zx_err_; diff --git a/chromium/base/fuchsia/intl_profile_watcher_unittest.cc b/chromium/base/fuchsia/intl_profile_watcher_unittest.cc index 23d85431602..110e5bfdd1b 100644 --- a/chromium/base/fuchsia/intl_profile_watcher_unittest.cc +++ b/chromium/base/fuchsia/intl_profile_watcher_unittest.cc @@ -10,7 +10,7 @@ #include <string> #include <vector> -#include "base/logging.h" +#include "base/check.h" #include "base/memory/ptr_util.h" #include "base/run_loop.h" #include "base/test/bind_test_util.h" diff --git a/chromium/base/fuchsia/startup_context.cc b/chromium/base/fuchsia/startup_context.cc index 156b6a207a7..7d1e0c2ac1e 100644 --- a/chromium/base/fuchsia/startup_context.cc +++ b/chromium/base/fuchsia/startup_context.cc @@ -9,6 +9,8 @@ #include <lib/sys/cpp/service_directory.h> #include "base/fuchsia/file_utils.h" +#include "base/logging.h" +#include "base/macros.h" namespace base { namespace fuchsia { diff --git a/chromium/base/fuchsia/test_component_context_for_process.cc b/chromium/base/fuchsia/test_component_context_for_process.cc index 22a304994ea..3f2f9ea2a39 100644 --- a/chromium/base/fuchsia/test_component_context_for_process.cc +++ b/chromium/base/fuchsia/test_component_context_for_process.cc @@ -9,6 +9,7 @@ #include <lib/fidl/cpp/interface_handle.h> #include <lib/sys/cpp/component_context.h> +#include "base/files/file_enumerator.h" #include "base/fuchsia/default_context.h" #include "base/fuchsia/filtered_service_directory.h" #include "base/fuchsia/fuchsia_logging.h" @@ -16,7 +17,8 @@ namespace base { -TestComponentContextForProcess::TestComponentContextForProcess() { +TestComponentContextForProcess::TestComponentContextForProcess( + InitialState initial_state) { // TODO(https://crbug.com/1038786): Migrate to sys::ComponentContextProvider // once it provides access to an sys::OutgoingDirectory or PseudoDir through // which to publish additional_services(). @@ -26,6 +28,15 @@ TestComponentContextForProcess::TestComponentContextForProcess() { context_services_ = std::make_unique<fuchsia::FilteredServiceDirectory>( base::fuchsia::ComponentContextForCurrentProcess()->svc().get()); + // Push all services from /svc to the test context if requested. + if (initial_state == InitialState::kCloneAll) { + base::FileEnumerator file_enum(base::FilePath("/svc"), false, + base::FileEnumerator::FILES); + for (auto file = file_enum.Next(); !file.empty(); file = file_enum.Next()) { + AddService(file.BaseName().value()); + } + } + // Create a ServiceDirectory backed by the contents of |incoming_directory|. fidl::InterfaceHandle<::fuchsia::io::Directory> incoming_directory; context_services_->ConnectClient(incoming_directory.NewRequest()); @@ -60,10 +71,15 @@ sys::OutgoingDirectory* TestComponentContextForProcess::additional_services() { return context_services_->outgoing_directory(); } +void TestComponentContextForProcess::AddService( + const base::StringPiece service) { + context_services_->AddService(service); +} + void TestComponentContextForProcess::AddServices( base::span<const base::StringPiece> services) { for (auto service : services) - context_services_->AddService(service); + AddService(service); } } // namespace base diff --git a/chromium/base/fuchsia/test_component_context_for_process.h b/chromium/base/fuchsia/test_component_context_for_process.h index f2e2b42699b..0c3544780ba 100644 --- a/chromium/base/fuchsia/test_component_context_for_process.h +++ b/chromium/base/fuchsia/test_component_context_for_process.h @@ -41,18 +41,25 @@ class FilteredServiceDirectory; // test base-class: // // TEST(MyFunkyTest, IsFunky) { -// TestComponentContextForTest test_context; +// TestComponentContextForProcess test_context; // // Configure the |test_context|. // // Run tests of code that uses ComponentContextForProcess(). // } // -// Services from the original process-global ComponentContext (usually the -// environment in which the test process is running), can be exposed through the -// |test_context| with AddServices(), during test setup: +// By default created context doesn't expose any services. Services from the +// original process-global ComponentContext (usually the environment in which +// the test process is running), can be exposed through the |test_context| with +// AddServices(), during test setup: // // test_context.AddServices({fuchsia::memorypressure::Provider::Name_, ...}); // // ... Execute tests which use fuchsia.memorypressure.Provider ... // +// Alternatively InitialState::kEmpty can be passed to the constructor to expose +// all services listed in /svc, e.g.: +// +// TestComponentContextForProcess test_context( +// TestComponentContextForProcess::InitialState::kEmpty); +// // Fake/mock implementations can be exposed via additional_services(): // // ScopedServiceBinding<funky::Service> binding( @@ -68,7 +75,13 @@ class FilteredServiceDirectory; // class BASE_EXPORT TestComponentContextForProcess { public: - TestComponentContextForProcess(); + enum class InitialState { + kEmpty, + kCloneAll, + }; + + TestComponentContextForProcess( + InitialState initial_state = InitialState::kEmpty); ~TestComponentContextForProcess(); TestComponentContextForProcess(const TestComponentContextForProcess&) = @@ -80,8 +93,9 @@ class BASE_EXPORT TestComponentContextForProcess { // published for use by the code-under test. sys::OutgoingDirectory* additional_services(); - // Allows the specified services from the original ComponentContext to be + // Allows the specified service(s) from the original ComponentContext to be // exposed via the test default ComponentContext. + void AddService(const base::StringPiece service); void AddServices(base::span<const base::StringPiece> services); // Returns the directory of services that the code under test has published diff --git a/chromium/base/fuchsia/time_zone_data_unittest.cc b/chromium/base/fuchsia/time_zone_data_unittest.cc index bb6f4ae668e..d90ac387dcf 100644 --- a/chromium/base/fuchsia/time_zone_data_unittest.cc +++ b/chromium/base/fuchsia/time_zone_data_unittest.cc @@ -5,6 +5,7 @@ #include "base/i18n/icu_util.h" #include "base/files/file_util.h" +#include "base/logging.h" #include "base/strings/string_util.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/hash/hash.cc b/chromium/base/hash/hash.cc index c96f8bc8434..d038c4735e1 100644 --- a/chromium/base/hash/hash.cc +++ b/chromium/base/hash/hash.cc @@ -4,6 +4,8 @@ #include "base/hash/hash.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "base/rand_util.h" #include "base/third_party/cityhash/city.h" #include "build/build_config.h" diff --git a/chromium/base/i18n/break_iterator.cc b/chromium/base/i18n/break_iterator.cc index 51ffc4a1520..2593dfb9bc2 100644 --- a/chromium/base/i18n/break_iterator.cc +++ b/chromium/base/i18n/break_iterator.cc @@ -6,7 +6,10 @@ #include <stdint.h> -#include "base/logging.h" +#include "base/check.h" +#include "base/lazy_instance.h" +#include "base/notreached.h" +#include "base/synchronization/lock.h" #include "third_party/icu/source/common/unicode/ubrk.h" #include "third_party/icu/source/common/unicode/uchar.h" #include "third_party/icu/source/common/unicode/ustring.h" @@ -31,58 +34,161 @@ BreakIterator::BreakIterator(const StringPiece16& str, const string16& rules) prev_(npos), pos_(0) {} +namespace { + +// We found the usage pattern of break iterator is to create, use and destroy. +// The following cache support multiple break iterator in the same thread and +// also optimize to not create break iterator many time. For each kind of break +// iterator (character, word, line and sentence, but NOT rule), we keep one of +// them in the main_ and lease it out. If some other code request a lease +// before |main_| is returned, we create a new instance of the iterator. +// This will keep at most 4 break iterators (one for each kind) unreleased until +// the program destruction time. +template <UBreakIteratorType break_type> +class DefaultLocaleBreakIteratorCache { + public: + DefaultLocaleBreakIteratorCache() + : main_status_(U_ZERO_ERROR), + main_(nullptr), + main_could_be_leased_(true) { + main_ = ubrk_open(break_type, nullptr, nullptr, 0, &main_status_); + if (U_FAILURE(main_status_)) { + NOTREACHED() << "ubrk_open failed for type " << break_type + << " with error " << main_status_; + } + } + + virtual ~DefaultLocaleBreakIteratorCache() { ubrk_close(main_); } + + UBreakIterator* Lease(UErrorCode& status) { + if (U_FAILURE(status)) { + return nullptr; + } + if (U_FAILURE(main_status_)) { + status = main_status_; + return nullptr; + } + { + AutoLock scoped_lock(lock_); + if (main_could_be_leased_) { + // Just lease the main_ out. + main_could_be_leased_ = false; + return main_; + } + } + // The main_ is already leased out to some other places, return a new + // object instead. + UBreakIterator* result = + ubrk_open(break_type, nullptr, nullptr, 0, &status); + if (U_FAILURE(status)) { + NOTREACHED() << "ubrk_open failed for type " << break_type + << " with error " << status; + } + return result; + } + + void Return(UBreakIterator* item) { + // If the return item is the main_, just remember we can lease it out + // next time. + if (item == main_) { + AutoLock scoped_lock(lock_); + main_could_be_leased_ = true; + } else { + // Close the item if it is not main_. + ubrk_close(item); + } + } + + private: + UErrorCode main_status_; + UBreakIterator* main_; + bool main_could_be_leased_; + Lock lock_; +}; + +static LazyInstance<DefaultLocaleBreakIteratorCache<UBRK_CHARACTER>>:: + DestructorAtExit char_break_cache = LAZY_INSTANCE_INITIALIZER; +static LazyInstance<DefaultLocaleBreakIteratorCache<UBRK_WORD>>:: + DestructorAtExit word_break_cache = LAZY_INSTANCE_INITIALIZER; +static LazyInstance<DefaultLocaleBreakIteratorCache<UBRK_SENTENCE>>:: + DestructorAtExit sentence_break_cache = LAZY_INSTANCE_INITIALIZER; +static LazyInstance<DefaultLocaleBreakIteratorCache<UBRK_LINE>>:: + DestructorAtExit line_break_cache = LAZY_INSTANCE_INITIALIZER; + +} // namespace + BreakIterator::~BreakIterator() { - if (iter_) - ubrk_close(static_cast<UBreakIterator*>(iter_)); + if (iter_) { + UBreakIterator* iter = static_cast<UBreakIterator*>(iter_); + switch (break_type_) { + // Free the iter if it is RULE_BASED + case RULE_BASED: + ubrk_close(iter); + break; + // Otherwise, return the iter to the cache it leased from.` + case BREAK_CHARACTER: + char_break_cache.Pointer()->Return(iter); + break; + case BREAK_WORD: + word_break_cache.Pointer()->Return(iter); + break; + case BREAK_SENTENCE: + sentence_break_cache.Pointer()->Return(iter); + break; + case BREAK_LINE: + case BREAK_NEWLINE: + line_break_cache.Pointer()->Return(iter); + break; + default: + NOTREACHED() << "invalid break_type_"; + break; + } + } } bool BreakIterator::Init() { UErrorCode status = U_ZERO_ERROR; UParseError parse_error; - UBreakIteratorType break_type; switch (break_type_) { case BREAK_CHARACTER: - break_type = UBRK_CHARACTER; + iter_ = char_break_cache.Pointer()->Lease(status); break; case BREAK_WORD: - break_type = UBRK_WORD; + iter_ = word_break_cache.Pointer()->Lease(status); break; case BREAK_SENTENCE: - break_type = UBRK_SENTENCE; + iter_ = sentence_break_cache.Pointer()->Lease(status); break; case BREAK_LINE: case BREAK_NEWLINE: - case RULE_BASED: // (Keep compiler happy, break_type not used in this case) - break_type = UBRK_LINE; + iter_ = line_break_cache.Pointer()->Lease(status); + break; + case RULE_BASED: + iter_ = + ubrk_openRules(rules_.c_str(), static_cast<int32_t>(rules_.length()), + nullptr, 0, &parse_error, &status); + if (U_FAILURE(status)) { + NOTREACHED() << "ubrk_openRules failed to parse rule string at line " + << parse_error.line << ", offset " << parse_error.offset; + } break; default: NOTREACHED() << "invalid break_type_"; return false; } - if (break_type_ == RULE_BASED) { - iter_ = ubrk_openRules(rules_.c_str(), - static_cast<int32_t>(rules_.length()), - string_.data(), - static_cast<int32_t>(string_.size()), - &parse_error, - &status); - if (U_FAILURE(status)) { - NOTREACHED() << "ubrk_openRules failed to parse rule string at line " - << parse_error.line << ", offset " << parse_error.offset; - } - } else { - iter_ = ubrk_open(break_type, nullptr, string_.data(), - static_cast<int32_t>(string_.size()), &status); - if (U_FAILURE(status)) { - NOTREACHED() << "ubrk_open failed for type " << break_type - << " with error " << status; - } - } - if (U_FAILURE(status)) { + if (U_FAILURE(status) || iter_ == nullptr) { return false; } + if (string_.data() != nullptr) { + ubrk_setText(static_cast<UBreakIterator*>(iter_), string_.data(), + static_cast<int32_t>(string_.size()), &status); + if (U_FAILURE(status)) { + return false; + } + } + // Move the iterator to the beginning of the string. ubrk_first(static_cast<UBreakIterator*>(iter_)); return true; diff --git a/chromium/base/i18n/char_iterator.cc b/chromium/base/i18n/char_iterator.cc index 7c298084663..7b7318c0790 100644 --- a/chromium/base/i18n/char_iterator.cc +++ b/chromium/base/i18n/char_iterator.cc @@ -4,7 +4,7 @@ #include "base/i18n/char_iterator.h" -#include "base/logging.h" +#include "base/check_op.h" #include "third_party/icu/source/common/unicode/utf16.h" #include "third_party/icu/source/common/unicode/utf8.h" diff --git a/chromium/base/i18n/file_util_icu.cc b/chromium/base/i18n/file_util_icu.cc index ded1015442c..f7525adf816 100644 --- a/chromium/base/i18n/file_util_icu.cc +++ b/chromium/base/i18n/file_util_icu.cc @@ -10,10 +10,10 @@ #include <memory> +#include "base/check.h" #include "base/files/file_path.h" #include "base/i18n/icu_string_conversions.h" #include "base/i18n/string_compare.h" -#include "base/logging.h" #include "base/macros.h" #include "base/memory/singleton.h" #include "base/strings/string_util.h" diff --git a/chromium/base/i18n/icu_string_conversions.cc b/chromium/base/i18n/icu_string_conversions.cc index 50321c25057..08a020ff44f 100644 --- a/chromium/base/i18n/icu_string_conversions.cc +++ b/chromium/base/i18n/icu_string_conversions.cc @@ -10,7 +10,8 @@ #include <memory> #include <vector> -#include "base/logging.h" +#include "base/check.h" +#include "base/notreached.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "third_party/icu/source/common/unicode/normalizer2.h" diff --git a/chromium/base/i18n/icu_string_conversions_unittest.cc b/chromium/base/i18n/icu_string_conversions_unittest.cc index 07a81727329..74b4f773c83 100644 --- a/chromium/base/i18n/icu_string_conversions_unittest.cc +++ b/chromium/base/i18n/icu_string_conversions_unittest.cc @@ -9,9 +9,9 @@ #include <limits> #include <sstream> +#include "base/check_op.h" #include "base/format_macros.h" #include "base/i18n/icu_string_conversions.h" -#include "base/logging.h" #include "base/stl_util.h" #include "base/strings/string_piece.h" #include "base/strings/stringprintf.h" diff --git a/chromium/base/i18n/number_formatting.cc b/chromium/base/i18n/number_formatting.cc index bb9c940c8f1..31d5e14d379 100644 --- a/chromium/base/i18n/number_formatting.cc +++ b/chromium/base/i18n/number_formatting.cc @@ -8,11 +8,11 @@ #include <memory> +#include "base/check.h" #include "base/format_macros.h" #include "base/i18n/message_formatter.h" #include "base/i18n/unicodestring.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" diff --git a/chromium/base/i18n/streaming_utf8_validator.cc b/chromium/base/i18n/streaming_utf8_validator.cc index 19c86a37a47..29c40ca6c9a 100644 --- a/chromium/base/i18n/streaming_utf8_validator.cc +++ b/chromium/base/i18n/streaming_utf8_validator.cc @@ -8,8 +8,8 @@ #include "base/i18n/streaming_utf8_validator.h" +#include "base/check_op.h" #include "base/i18n/utf8_validator_tables.h" -#include "base/logging.h" namespace base { namespace { diff --git a/chromium/base/i18n/string_compare.cc b/chromium/base/i18n/string_compare.cc index 6cd59b98f49..c0e83c6f730 100644 --- a/chromium/base/i18n/string_compare.cc +++ b/chromium/base/i18n/string_compare.cc @@ -4,7 +4,7 @@ #include "base/i18n/string_compare.h" -#include "base/logging.h" +#include "base/check.h" #include "base/strings/utf_string_conversions.h" #include "third_party/icu/source/common/unicode/unistr.h" diff --git a/chromium/base/i18n/string_search.cc b/chromium/base/i18n/string_search.cc index 8088adf2ce5..9f830d48924 100644 --- a/chromium/base/i18n/string_search.cc +++ b/chromium/base/i18n/string_search.cc @@ -5,7 +5,6 @@ #include <stdint.h> #include "base/i18n/string_search.h" -#include "base/logging.h" #include "third_party/icu/source/i18n/unicode/usearch.h" diff --git a/chromium/base/ios/crb_protocol_observers.mm b/chromium/base/ios/crb_protocol_observers.mm index 86a081e8fec..cd6a7ee8ad2 100644 --- a/chromium/base/ios/crb_protocol_observers.mm +++ b/chromium/base/ios/crb_protocol_observers.mm @@ -9,8 +9,9 @@ #include <algorithm> #include <vector> -#include "base/logging.h" +#include "base/check.h" #include "base/mac/scoped_nsobject.h" +#include "base/notreached.h" #include "base/stl_util.h" @interface CRBProtocolObservers () { diff --git a/chromium/base/ios/crb_protocol_observers_unittest.mm b/chromium/base/ios/crb_protocol_observers_unittest.mm index 3538c9e510f..f09c1259623 100644 --- a/chromium/base/ios/crb_protocol_observers_unittest.mm +++ b/chromium/base/ios/crb_protocol_observers_unittest.mm @@ -5,8 +5,8 @@ #import "base/ios/crb_protocol_observers.h" #include "base/ios/weak_nsobject.h" -#include "base/logging.h" #include "base/mac/scoped_nsobject.h" +#include "base/notreached.h" #include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest_mac.h" #include "testing/platform_test.h" diff --git a/chromium/base/ios/device_util.mm b/chromium/base/ios/device_util.mm index 5ec1e69e2aa..e9c111109e5 100644 --- a/chromium/base/ios/device_util.mm +++ b/chromium/base/ios/device_util.mm @@ -15,7 +15,7 @@ #include <memory> -#include "base/logging.h" +#include "base/check.h" #include "base/mac/scoped_cftyperef.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" diff --git a/chromium/base/ios/ns_error_util.mm b/chromium/base/ios/ns_error_util.mm index c44d9ee4135..1bff3d4da9f 100644 --- a/chromium/base/ios/ns_error_util.mm +++ b/chromium/base/ios/ns_error_util.mm @@ -6,7 +6,7 @@ #import <Foundation/Foundation.h> -#include "base/logging.h" +#include "base/check.h" #include "base/mac/scoped_nsobject.h" namespace base { diff --git a/chromium/base/ios/scoped_critical_action.h b/chromium/base/ios/scoped_critical_action.h index 2f7d16c3f07..1954f9c6059 100644 --- a/chromium/base/ios/scoped_critical_action.h +++ b/chromium/base/ios/scoped_critical_action.h @@ -27,7 +27,7 @@ namespace ios { // save such data. class ScopedCriticalAction { public: - ScopedCriticalAction(); + ScopedCriticalAction(StringPiece task_name); ~ScopedCriticalAction(); private: @@ -41,7 +41,9 @@ class ScopedCriticalAction { // Informs the OS that the background task has started. This is a // static method to ensure that the instance has a non-zero refcount. - static void StartBackgroundTask(scoped_refptr<Core> core); + // |task_name| is used by the OS to log any leaked background tasks. + static void StartBackgroundTask(scoped_refptr<Core> core, + StringPiece task_name); // Informs the OS that the background task has completed. This is a // static method to ensure that the instance has a non-zero refcount. static void EndBackgroundTask(scoped_refptr<Core> core); @@ -51,10 +53,10 @@ class ScopedCriticalAction { ~Core(); // |UIBackgroundTaskIdentifier| returned by - // |beginBackgroundTaskWithExpirationHandler:| when marking the beginning of - // a long-running background task. It is defined as an |unsigned int| - // instead of a |UIBackgroundTaskIdentifier| so this class can be used in - // .cc files. + // |beginBackgroundTaskWithName:expirationHandler:| when marking the + // beginning of a long-running background task. It is defined as an + // |unsigned int| instead of a |UIBackgroundTaskIdentifier| so this class + // can be used in .cc files. unsigned int background_task_id_; Lock background_task_id_lock_; diff --git a/chromium/base/ios/scoped_critical_action.mm b/chromium/base/ios/scoped_critical_action.mm index dbfbd4525bb..62b0ccc6e1c 100644 --- a/chromium/base/ios/scoped_critical_action.mm +++ b/chromium/base/ios/scoped_critical_action.mm @@ -6,16 +6,68 @@ #import <UIKit/UIKit.h> +#include <float.h> +#include "base/ios/ios_util.h" #include "base/logging.h" #include "base/memory/ref_counted.h" +#include "base/metrics/histogram_macros.h" +#include "base/metrics/user_metrics.h" +#include "base/strings/sys_string_conversions.h" #include "base/synchronization/lock.h" +#import "build/branding_buildflags.h" + +#if BUILDFLAG(CHROMIUM_BRANDING) +#include <dlfcn.h> +#endif // BUILDFLAG(CHROMIUM_BRANDING) + +namespace { + +// |backgroundTimeRemaining| is thread-safe, but as of Xcode 11.4 this method +// is still incorrectly marked as not thread-safe, so checking the value of +// |backgroundTimeRemaining| will trigger libMainThreadChecker (if enabled). +// Instead, for Chromium builds only, disable libMainThreadChecker for just +// this call. This logic is only useful for developer builds, and should not +// be included in official builds. These blocks should be removed if future +// versions of the library whitelists |backgroundTimeRemaining|. +NSTimeInterval GetBackgroundTimeRemaining(UIApplication* application) { +#if BUILDFLAG(CHROMIUM_BRANDING) + if (!base::ios::IsRunningOnIOS13OrLater()) { + // On developer iOS12 builds there's no way to suppress the main thread + // checker assert. Since it's a developer build, simply return 0. + return 0; + } + static char const* const lib_main_thread_checker_bundle_path = +#if TARGET_IPHONE_SIMULATOR + "/usr/lib/libMainThreadChecker.dylib"; +#else + "/Developer/usr/lib/libMainThreadChecker.dylib"; +#endif + static void* handle = + dlopen(lib_main_thread_checker_bundle_path, RTLD_NOLOAD | RTLD_LAZY); + static void (*main_thread_checker_suppression_begin)() = + (void (*)())dlsym(handle, "__main_thread_checker_suppression_begin"); + if (main_thread_checker_suppression_begin) + main_thread_checker_suppression_begin(); +#endif // BUILDFLAG(CHROMIUM_BRANDING) + NSTimeInterval time = application.backgroundTimeRemaining; +#if BUILDFLAG(CHROMIUM_BRANDING) + static void (*main_thread_checker_suppression_end)() = + (void (*)())dlsym(handle, "__main_thread_checker_suppression_end"); + if (main_thread_checker_suppression_end) + main_thread_checker_suppression_end(); + dlclose(handle); +#endif // BUILDFLAG(CHROMIUM_BRANDING) + return time; +} + +} // namespace namespace base { namespace ios { -ScopedCriticalAction::ScopedCriticalAction() +ScopedCriticalAction::ScopedCriticalAction(StringPiece task_name) : core_(MakeRefCounted<ScopedCriticalAction::Core>()) { - ScopedCriticalAction::Core::StartBackgroundTask(core_); + ScopedCriticalAction::Core::StartBackgroundTask(core_, task_name); } ScopedCriticalAction::~ScopedCriticalAction() { @@ -29,30 +81,43 @@ ScopedCriticalAction::Core::~Core() { DCHECK_EQ(background_task_id_, UIBackgroundTaskInvalid); } -// This implementation calls |beginBackgroundTaskWithExpirationHandler:| when -// instantiated and |endBackgroundTask:| when destroyed, creating a scope whose -// execution will continue (temporarily) even after the app is backgrounded. +// This implementation calls |beginBackgroundTaskWithName:expirationHandler:| +// when instantiated and |endBackgroundTask:| when destroyed, creating a scope +// whose execution will continue (temporarily) even after the app is +// backgrounded. // static -void ScopedCriticalAction::Core::StartBackgroundTask(scoped_refptr<Core> core) { +void ScopedCriticalAction::Core::StartBackgroundTask(scoped_refptr<Core> core, + StringPiece task_name) { UIApplication* application = [UIApplication sharedApplication]; if (!application) { return; } - core->background_task_id_ = - [application beginBackgroundTaskWithExpirationHandler:^{ - DLOG(WARNING) << "Background task with id " << core->background_task_id_ - << " expired."; - // Note if |endBackgroundTask:| is not called for each task before time - // expires, the system kills the application. - EndBackgroundTask(core); - }]; + NSTimeInterval time = GetBackgroundTimeRemaining(application); + if (time != DBL_MAX && time > 0) { + UMA_HISTOGRAM_MEDIUM_TIMES("IOS.CriticalActionBackgroundTimeRemaining", + base::TimeDelta::FromSeconds(time)); + } + + NSString* task_string = + !task_name.empty() ? base::SysUTF8ToNSString(task_name) : nil; + core->background_task_id_ = [application + beginBackgroundTaskWithName:task_string + expirationHandler:^{ + DLOG(WARNING) + << "Background task with name <" + << base::SysNSStringToUTF8(task_string) << "> and with " + << "id " << core->background_task_id_ << " expired."; + // Note if |endBackgroundTask:| is not called for each task + // before time expires, the system kills the application. + EndBackgroundTask(core); + }]; if (core->background_task_id_ == UIBackgroundTaskInvalid) { - DLOG(WARNING) - << "beginBackgroundTaskWithExpirationHandler: returned an invalid ID"; + DLOG(WARNING) << "beginBackgroundTaskWithName:<" << task_name << "> " + << "expirationHandler: returned an invalid ID"; } else { - VLOG(3) << "Beginning background task with id " + VLOG(3) << "Beginning background task <" << task_name << "> with id " << core->background_task_id_; } } diff --git a/chromium/base/json/json_file_value_serializer.cc b/chromium/base/json/json_file_value_serializer.cc index 6ec275cf5ca..683259d718e 100644 --- a/chromium/base/json/json_file_value_serializer.cc +++ b/chromium/base/json/json_file_value_serializer.cc @@ -4,9 +4,10 @@ #include "base/json/json_file_value_serializer.h" +#include "base/check.h" #include "base/files/file_util.h" #include "base/json/json_string_value_serializer.h" -#include "base/logging.h" +#include "base/notreached.h" #include "build/build_config.h" using base::FilePath; diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc index 5d34887fd7e..e1e44c9781f 100644 --- a/chromium/base/json/json_parser.cc +++ b/chromium/base/json/json_parser.cc @@ -8,7 +8,7 @@ #include <utility> #include <vector> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/numerics/safe_conversions.h" #include "base/strings/string_number_conversions.h" @@ -63,8 +63,19 @@ JSONParser::~JSONParser() = default; Optional<Value> JSONParser::Parse(StringPiece input) { input_ = input; index_ = 0; + // Line and column counting is 1-based, but |index_| is 0-based. For example, + // if input is "Aaa\nB" then 'A' and 'B' are both in column 1 (at lines 1 and + // 2) and have indexes of 0 and 4. We track the line number explicitly (the + // |line_number_| field) and the column number implicitly (the difference + // between |index_| and |index_last_line_|). In calculating that difference, + // |index_last_line_| is the index of the '\r' or '\n', not the index of the + // first byte after the '\n'. For the 'B' in "Aaa\nB", its |index_| and + // |index_last_line_| would be 4 and 3: 'B' is in column (4 - 3) = 1. We + // initialize |index_last_line_| to -1, not 0, since -1 is the (out of range) + // index of the imaginary '\n' immediately before the start of the string: + // 'A' is in column (0 - -1) = 1. line_number_ = 1; - index_last_line_ = 0; + index_last_line_ = -1; error_code_ = JSONReader::JSON_NO_ERROR; error_line_ = 0; @@ -73,7 +84,7 @@ Optional<Value> JSONParser::Parse(StringPiece input) { // ICU and ReadUnicodeCharacter() use int32_t for lengths, so ensure // that the index_ will not overflow when parsing. if (!base::IsValueInRangeForNumericType<int32_t>(input.length())) { - ReportError(JSONReader::JSON_TOO_LARGE, 0); + ReportError(JSONReader::JSON_TOO_LARGE, -1); return nullopt; } @@ -89,7 +100,7 @@ Optional<Value> JSONParser::Parse(StringPiece input) { // Make sure the input stream is at an end. if (GetNextToken() != T_END_OF_INPUT) { - ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1); + ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 0); return nullopt; } @@ -312,20 +323,20 @@ Optional<Value> JSONParser::ParseToken(Token token) { case T_NULL: return ConsumeLiteral(); default: - ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1); + ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0); return nullopt; } } Optional<Value> JSONParser::ConsumeDictionary() { if (ConsumeChar() != '{') { - ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1); + ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0); return nullopt; } StackMarker depth_check(max_depth_, &stack_depth_); if (depth_check.IsTooDeep()) { - ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 0); + ReportError(JSONReader::JSON_TOO_MUCH_NESTING, -1); return nullopt; } @@ -334,7 +345,7 @@ Optional<Value> JSONParser::ConsumeDictionary() { Token token = GetNextToken(); while (token != T_OBJECT_END) { if (token != T_STRING) { - ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 1); + ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 0); return nullopt; } @@ -347,7 +358,7 @@ Optional<Value> JSONParser::ConsumeDictionary() { // Read the separator. token = GetNextToken(); if (token != T_OBJECT_PAIR_SEPARATOR) { - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } @@ -367,7 +378,7 @@ Optional<Value> JSONParser::ConsumeDictionary() { ConsumeChar(); token = GetNextToken(); if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) { - ReportError(JSONReader::JSON_TRAILING_COMMA, 1); + ReportError(JSONReader::JSON_TRAILING_COMMA, 0); return nullopt; } } else if (token != T_OBJECT_END) { @@ -385,13 +396,13 @@ Optional<Value> JSONParser::ConsumeDictionary() { Optional<Value> JSONParser::ConsumeList() { if (ConsumeChar() != '[') { - ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1); + ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0); return nullopt; } StackMarker depth_check(max_depth_, &stack_depth_); if (depth_check.IsTooDeep()) { - ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 0); + ReportError(JSONReader::JSON_TOO_MUCH_NESTING, -1); return nullopt; } @@ -412,11 +423,11 @@ Optional<Value> JSONParser::ConsumeList() { ConsumeChar(); token = GetNextToken(); if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) { - ReportError(JSONReader::JSON_TRAILING_COMMA, 1); + ReportError(JSONReader::JSON_TRAILING_COMMA, 0); return nullopt; } } else if (token != T_ARRAY_END) { - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } } @@ -435,7 +446,7 @@ Optional<Value> JSONParser::ConsumeString() { bool JSONParser::ConsumeStringRaw(StringBuilder* out) { if (ConsumeChar() != '"') { - ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1); + ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0); return false; } @@ -451,7 +462,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) { &next_char) || !IsValidCodepoint(next_char)) { if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) { - ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1); + ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 0); return false; } ConsumeChar(); @@ -465,7 +476,19 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) { return true; } if (next_char != '\\') { - // If this character is not an escape sequence... + // If this character is not an escape sequence, track any line breaks and + // copy next_char to the StringBuilder. The JSON spec forbids unescaped + // ASCII control characters within a string, including '\r' and '\n', but + // this implementation is more lenient. + if ((next_char == '\r') || (next_char == '\n')) { + index_last_line_ = index_; + // Don't increment line_number_ twice for "\r\n". We are guaranteed + // that (index_ > 0) because we are consuming a string, so we must have + // seen an opening '"' quote character. + if ((next_char == '\r') || (input_[index_ - 1] != '\r')) { + ++line_number_; + } + } ConsumeChar(); string.Append(next_char); } else { @@ -478,7 +501,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) { // Read past the escape '\' and ensure there's a character following. Optional<StringPiece> escape_sequence = ConsumeChars(2); if (!escape_sequence) { - ReportError(JSONReader::JSON_INVALID_ESCAPE, 0); + ReportError(JSONReader::JSON_INVALID_ESCAPE, -1); return false; } @@ -489,14 +512,14 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) { // are supported here for backwards-compatiblity with the old parser. escape_sequence = ConsumeChars(2); if (!escape_sequence) { - ReportError(JSONReader::JSON_INVALID_ESCAPE, -2); + ReportError(JSONReader::JSON_INVALID_ESCAPE, -3); return false; } int hex_digit = 0; if (!UnprefixedHexStringToInt(*escape_sequence, &hex_digit) || !IsValidCharacter(hex_digit)) { - ReportError(JSONReader::JSON_INVALID_ESCAPE, -2); + ReportError(JSONReader::JSON_INVALID_ESCAPE, -3); return false; } @@ -507,7 +530,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) { // UTF units are of the form \uXXXX. uint32_t code_point; if (!DecodeUTF16(&code_point)) { - ReportError(JSONReader::JSON_INVALID_ESCAPE, 0); + ReportError(JSONReader::JSON_INVALID_ESCAPE, -1); return false; } string.Append(code_point); @@ -542,13 +565,13 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) { break; // All other escape squences are illegal. default: - ReportError(JSONReader::JSON_INVALID_ESCAPE, 0); + ReportError(JSONReader::JSON_INVALID_ESCAPE, -1); return false; } } } - ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); + ReportError(JSONReader::JSON_SYNTAX_ERROR, -1); return false; } @@ -618,7 +641,7 @@ Optional<Value> JSONParser::ConsumeNumber() { ConsumeChar(); if (!ReadInt(false)) { - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } end_index = index_; @@ -627,7 +650,7 @@ Optional<Value> JSONParser::ConsumeNumber() { if (PeekChar() == '.') { ConsumeChar(); if (!ReadInt(true)) { - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } end_index = index_; @@ -641,7 +664,7 @@ Optional<Value> JSONParser::ConsumeNumber() { ConsumeChar(); } if (!ReadInt(true)) { - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } end_index = index_; @@ -660,7 +683,7 @@ Optional<Value> JSONParser::ConsumeNumber() { case T_END_OF_INPUT: break; default: - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } @@ -678,7 +701,7 @@ Optional<Value> JSONParser::ConsumeNumber() { return Value(num_double); } - ReportError(JSONReader::JSON_UNREPRESENTABLE_NUMBER, 1); + ReportError(JSONReader::JSON_UNREPRESENTABLE_NUMBER, 0); return nullopt; } @@ -713,7 +736,7 @@ Optional<Value> JSONParser::ConsumeLiteral() { return Value(false); if (ConsumeIfMatch("null")) return Value(Value::Type::NONE); - ReportError(JSONReader::JSON_SYNTAX_ERROR, 1); + ReportError(JSONReader::JSON_SYNTAX_ERROR, 0); return nullopt; } @@ -730,6 +753,12 @@ void JSONParser::ReportError(JSONReader::JsonParseError code, error_code_ = code; error_line_ = line_number_; error_column_ = index_ - index_last_line_ + column_adjust; + + // For a final blank line ('\n' and then EOF), a negative column_adjust may + // put us below 1, which doesn't really make sense for 1-based columns. + if (error_column_ < 1) { + error_column_ = 1; + } } // static diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h index 548fb282700..523062e6812 100644 --- a/chromium/base/json/json_parser.h +++ b/chromium/base/json/json_parser.h @@ -246,8 +246,6 @@ class BASE_EXPORT JSONParser { FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals); FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers); FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages); - FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidCharacters); - FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidUTF16EscapeSequence); DISALLOW_COPY_AND_ASSIGN(JSONParser); }; diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc index 6ce5bab6519..c4926423433 100644 --- a/chromium/base/json/json_parser_unittest.cc +++ b/chromium/base/json/json_parser_unittest.cc @@ -29,17 +29,6 @@ class JSONParserTest : public testing::Test { return parser; } - // MSan will do a better job detecting over-read errors if the input is - // not nul-terminated on the heap. This will copy |input| to a new buffer - // owned by |owner|, returning a StringPiece to |owner|. - StringPiece MakeNotNullTerminatedInput(const char* input, - std::unique_ptr<char[]>* owner) { - size_t str_len = strlen(input); - owner->reset(new char[str_len]); - memcpy(owner->get(), input, str_len); - return StringPiece(owner->get(), str_len); - } - void TestLastThree(JSONParser* parser) { EXPECT_EQ(',', *parser->PeekChar()); parser->ConsumeChar(); @@ -226,25 +215,6 @@ TEST_F(JSONParserTest, ErrorMessages) { EXPECT_TRUE(root.error_message.empty()); EXPECT_EQ(0, root.error_code); - // Test line and column counting - const char big_json[] = "[\n0,\n1,\n2,\n3,4,5,6 7,\n8,\n9\n]"; - // error here ----------------------------------^ - root = JSONReader::ReadAndReturnValueWithError(big_json, JSON_PARSE_RFC); - EXPECT_FALSE(root.value); - EXPECT_EQ(JSONParser::FormatErrorMessage(5, 10, JSONReader::kSyntaxError), - root.error_message); - EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, root.error_code); - - // Test line and column counting with "\r\n" line ending - const char big_json_crlf[] = - "[\r\n0,\r\n1,\r\n2,\r\n3,4,5,6 7,\r\n8,\r\n9\r\n]"; - // error here ----------------------^ - root = JSONReader::ReadAndReturnValueWithError(big_json_crlf, JSON_PARSE_RFC); - EXPECT_FALSE(root.value); - EXPECT_EQ(JSONParser::FormatErrorMessage(5, 10, JSONReader::kSyntaxError), - root.error_message); - EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, root.error_code); - // Test each of the error conditions root = JSONReader::ReadAndReturnValueWithError("{},{}", JSON_PARSE_RFC); EXPECT_FALSE(root.value); @@ -312,160 +282,5 @@ TEST_F(JSONParserTest, ErrorMessages) { EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, root.error_code); } -TEST_F(JSONParserTest, Decode4ByteUtf8Char) { - // This test strings contains a 4 byte unicode character (a smiley!) that the - // reader should be able to handle (the character is \xf0\x9f\x98\x87). - const char kUtf8Data[] = - "[\"😇\",[],[],[],{\"google:suggesttype\":[]}]"; - JSONReader::ValueWithError root = - JSONReader::ReadAndReturnValueWithError(kUtf8Data, JSON_PARSE_RFC); - EXPECT_TRUE(root.value) << root.error_message; -} - -TEST_F(JSONParserTest, DecodeUnicodeNonCharacter) { - // Tests Unicode code points (encoded as escaped UTF-16) that are not valid - // characters. - EXPECT_TRUE(JSONReader::Read("[\"\\uFDD0\"]")); // U+FDD0 - EXPECT_TRUE(JSONReader::Read("[\"\\uFDDF\"]")); // U+FDDF - EXPECT_TRUE(JSONReader::Read("[\"\\uFDEF\"]")); // U+FDEF - EXPECT_TRUE(JSONReader::Read("[\"\\uFFFE\"]")); // U+FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uFFFF\"]")); // U+FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD83F\\uDFFE\"]")); // U+01FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD83F\\uDFFF\"]")); // U+01FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD87F\\uDFFE\"]")); // U+02FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD87F\\uDFFF\"]")); // U+02FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD8BF\\uDFFE\"]")); // U+03FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD8BF\\uDFFF\"]")); // U+03FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD8FF\\uDFFE\"]")); // U+04FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD8FF\\uDFFF\"]")); // U+04FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD93F\\uDFFE\"]")); // U+05FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD93F\\uDFFF\"]")); // U+05FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD97F\\uDFFE\"]")); // U+06FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD97F\\uDFFF\"]")); // U+06FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD9BF\\uDFFE\"]")); // U+07FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD9BF\\uDFFF\"]")); // U+07FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uD9FF\\uDFFE\"]")); // U+08FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uD9FF\\uDFFF\"]")); // U+08FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDA3F\\uDFFE\"]")); // U+09FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDA3F\\uDFFF\"]")); // U+09FFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDA7F\\uDFFE\"]")); // U+0AFFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDA7F\\uDFFF\"]")); // U+0AFFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDABF\\uDFFE\"]")); // U+0BFFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDABF\\uDFFF\"]")); // U+0BFFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDAFF\\uDFFE\"]")); // U+0CFFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDAFF\\uDFFF\"]")); // U+0CFFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDB3F\\uDFFE\"]")); // U+0DFFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDB3F\\uDFFF\"]")); // U+0DFFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDB7F\\uDFFE\"]")); // U+0EFFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDB7F\\uDFFF\"]")); // U+0EFFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDBBF\\uDFFE\"]")); // U+0FFFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDBBF\\uDFFF\"]")); // U+0FFFFF - EXPECT_TRUE(JSONReader::Read("[\"\\uDBFF\\uDFFE\"]")); // U+10FFFE - EXPECT_TRUE(JSONReader::Read("[\"\\uDBFF\\uDFFF\"]")); // U+10FFFF -} - -TEST_F(JSONParserTest, DecodeNegativeEscapeSequence) { - EXPECT_FALSE(JSONReader::Read("[\"\\x-A\"]")); - EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]")); -} - -// Verifies invalid code points are replaced. -TEST_F(JSONParserTest, ReplaceInvalidCharacters) { - // U+D800 is a lone surrogate. - const std::string invalid = "\"\xED\xA0\x80\""; - std::unique_ptr<JSONParser> parser( - NewTestParser(invalid, JSON_REPLACE_INVALID_CHARACTERS)); - Optional<Value> value(parser->ConsumeString()); - ASSERT_TRUE(value); - std::string str; - EXPECT_TRUE(value->GetAsString(&str)); - // Expect three U+FFFD (one for each UTF-8 byte in the invalid code - // point). - EXPECT_EQ("\xEF\xBF\xBD\xEF\xBF\xBD\xEF\xBF\xBD", str); -} - -TEST_F(JSONParserTest, ReplaceInvalidUTF16EscapeSequence) { - // U+D800 is a lone surrogate. - const std::string invalid = "\"_\\uD800_\""; - std::unique_ptr<JSONParser> parser( - NewTestParser(invalid, JSON_REPLACE_INVALID_CHARACTERS)); - Optional<Value> value(parser->ConsumeString()); - ASSERT_TRUE(value); - std::string str; - EXPECT_TRUE(value->GetAsString(&str)); - EXPECT_EQ("_\xEF\xBF\xBD_", str); -} - -TEST_F(JSONParserTest, ParseNumberErrors) { - const struct { - const char* input; - bool parse_success; - double value; - } kCases[] = { - // clang-format off - {"1", true, 1}, - {"2.", false, 0}, - {"42", true, 42}, - {"6e", false, 0}, - {"43e2", true, 4300}, - {"43e-", false, 0}, - {"9e-3", true, 0.009}, - {"2e+", false, 0}, - {"2e+2", true, 200}, - // clang-format on - }; - - for (unsigned int i = 0; i < base::size(kCases); ++i) { - auto test_case = kCases[i]; - SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input)); - - std::unique_ptr<char[]> input_owner; - StringPiece input = - MakeNotNullTerminatedInput(test_case.input, &input_owner); - - Optional<Value> result = JSONReader::Read(input); - EXPECT_EQ(test_case.parse_success, result.has_value()); - - if (!result) - continue; - - ASSERT_TRUE(result->is_double() || result->is_int()); - EXPECT_EQ(test_case.value, result->GetDouble()); - } -} - -TEST_F(JSONParserTest, UnterminatedInputs) { - const char* const kCases[] = { - // clang-format off - "/", - "//", - "/*", - "\"xxxxxx", - "\"", - "{ ", - "[\t", - "tru", - "fals", - "nul", - "\"\\x", - "\"\\x2", - "\"\\u123", - "\"\\uD803\\u", - "\"\\", - "\"\\/", - // clang-format on - }; - - for (unsigned int i = 0; i < base::size(kCases); ++i) { - auto* test_case = kCases[i]; - SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case)); - - std::unique_ptr<char[]> input_owner; - StringPiece input = MakeNotNullTerminatedInput(test_case, &input_owner); - - EXPECT_FALSE(JSONReader::Read(input)); - } -} - } // namespace internal } // namespace base diff --git a/chromium/base/json/json_perftest_decodebench.cc b/chromium/base/json/json_perftest_decodebench.cc new file mode 100644 index 00000000000..e0f265dde5f --- /dev/null +++ b/chromium/base/json/json_perftest_decodebench.cc @@ -0,0 +1,98 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This program measures the time taken to decode the given JSON files (the +// command line arguments). It is for manual benchmarking. +// +// Usage: +// $ ninja -C out/foobar json_perftest_decodebench +// $ out/foobar/json_perftest_decodebench -a -n=10 the/path/to/your/*.json +// +// The -n=10 switch controls the number of iterations. It defaults to 1. +// +// The -a switch means to print 1 non-comment line per input file (the average +// iteration time). Without this switch (the default), it prints n non-comment +// lines per input file (individual iteration times). For a single input file, +// building and running this program before and after a particular commit can +// work well with the 'ministat' tool: https://github.com/thorduri/ministat + +#include <inttypes.h> +#include <iomanip> +#include <iostream> + +#include "base/command_line.h" +#include "base/files/file_util.h" +#include "base/json/json_reader.h" +#include "base/time/time.h" + +int main(int argc, char* argv[]) { + if (!base::ThreadTicks::IsSupported()) { + std::cout << "# base::ThreadTicks is not supported\n"; + return EXIT_FAILURE; + } + base::ThreadTicks::WaitUntilInitialized(); + + base::CommandLine::Init(argc, argv); + base::CommandLine* command_line = base::CommandLine::ForCurrentProcess(); + bool average = command_line->HasSwitch("a"); + int iterations = 1; + std::string iterations_str = command_line->GetSwitchValueASCII("n"); + if (!iterations_str.empty()) { + iterations = atoi(iterations_str.c_str()); + if (iterations < 1) { + std::cout << "# invalid -n command line switch\n"; + return EXIT_FAILURE; + } + } + + if (average) { + std::cout << "# Microseconds (μs), n=" << iterations << ", averaged" + << std::endl; + } else { + std::cout << "# Microseconds (μs), n=" << iterations << std::endl; + } + for (const auto& filename : command_line->GetArgs()) { + std::string src; + if (!base::ReadFileToString(base::FilePath(filename), &src)) { + std::cout << "# could not read " << filename << std::endl; + return EXIT_FAILURE; + } + + int64_t total_time = 0; + std::string error_message; + for (int i = 0; i < iterations; ++i) { + auto start = base::ThreadTicks::Now(); + auto v = base::JSONReader::ReadAndReturnValueWithError(src); + auto end = base::ThreadTicks::Now(); + int64_t iteration_time = (end - start).InMicroseconds(); + total_time += iteration_time; + + if (i == 0) { + if (average) { + error_message = std::move(v.error_message); + } else { + std::cout << "# " << filename; + if (!v.error_message.empty()) { + std::cout << ": " << v.error_message; + } + std::cout << std::endl; + } + } + + if (!average) { + std::cout << iteration_time << std::endl; + } + } + + if (average) { + int64_t average_time = total_time / iterations; + std::cout << std::setw(12) << average_time << "\t# " << filename; + if (!error_message.empty()) { + std::cout << ": " << error_message; + } + std::cout << std::endl; + } + } + return EXIT_SUCCESS; +} diff --git a/chromium/base/json/json_reader.cc b/chromium/base/json/json_reader.cc index 0956bf50fb9..53e3df9fdf3 100644 --- a/chromium/base/json/json_reader.cc +++ b/chromium/base/json/json_reader.cc @@ -8,7 +8,7 @@ #include <vector> #include "base/json/json_parser.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/optional.h" namespace base { @@ -17,24 +17,18 @@ namespace base { static_assert(JSONReader::JSON_PARSE_ERROR_COUNT < 1000, "JSONReader error out of bounds"); -const char JSONReader::kInvalidEscape[] = - "Invalid escape sequence."; -const char JSONReader::kSyntaxError[] = - "Syntax error."; -const char JSONReader::kUnexpectedToken[] = - "Unexpected token."; -const char JSONReader::kTrailingComma[] = - "Trailing comma not allowed."; -const char JSONReader::kTooMuchNesting[] = - "Too much nesting."; +const char JSONReader::kInvalidEscape[] = "Invalid escape sequence."; +const char JSONReader::kSyntaxError[] = "Syntax error."; +const char JSONReader::kUnexpectedToken[] = "Unexpected token."; +const char JSONReader::kTrailingComma[] = "Trailing comma not allowed."; +const char JSONReader::kTooMuchNesting[] = "Too much nesting."; const char JSONReader::kUnexpectedDataAfterRoot[] = "Unexpected data after root element."; const char JSONReader::kUnsupportedEncoding[] = "Unsupported encoding. JSON must be UTF-8."; const char JSONReader::kUnquotedDictionaryKey[] = "Dictionary keys must be quoted."; -const char JSONReader::kInputTooLarge[] = - "Input string is too large (>2GB)."; +const char JSONReader::kInputTooLarge[] = "Input string is too large (>2GB)."; const char JSONReader::kUnrepresentableNumber[] = "Number cannot be represented."; @@ -147,10 +141,6 @@ std::unique_ptr<Value> JSONReader::ReadToValueDeprecated(StringPiece json) { return value ? std::make_unique<Value>(std::move(*value)) : nullptr; } -JSONReader::JsonParseError JSONReader::error_code() const { - return parser_->error_code(); -} - std::string JSONReader::GetErrorMessage() const { return parser_->GetErrorMessage(); } diff --git a/chromium/base/json/json_reader.h b/chromium/base/json/json_reader.h index 730d6e5505d..511fffe32e7 100644 --- a/chromium/base/json/json_reader.h +++ b/chromium/base/json/json_reader.h @@ -1,29 +1,39 @@ // Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. + +// A JSON parser, converting from a base::StringPiece to a base::Value. +// +// The JSON spec is: +// https://tools.ietf.org/rfc/rfc8259.txt +// which obsoletes the earlier RFCs 4627, 7158 and 7159. // -// A JSON parser. Converts strings of JSON into a Value object (see -// base/values.h). -// http://www.ietf.org/rfc/rfc4627.txt?number=4627 +// This RFC should be equivalent to the informal spec: +// https://www.json.org/json-en.html // -// Known limitations/deviations from the RFC: -// - Only knows how to parse ints within the range of a signed 32 bit int and -// decimal numbers within a double. -// - Assumes input is encoded as UTF8. The spec says we should allow UTF-16 -// (BE or LE) and UTF-32 (BE or LE) as well. -// - We limit nesting to 100 levels to prevent stack overflow (this is allowed -// by the RFC). -// - A Unicode FAQ ("http://unicode.org/faq/utf_bom.html") writes a data -// stream may start with a Unicode Byte-Order-Mark (U+FEFF), i.e. the input -// UTF-8 string for the JSONReader::JsonToValue() function may start with a -// UTF-8 BOM (0xEF, 0xBB, 0xBF). -// To avoid the function from mis-treating a UTF-8 BOM as an invalid -// character, the function skips a Unicode BOM at the beginning of the -// Unicode string (converted from the input UTF-8 string) before parsing it. +// Implementation choices permitted by the RFC: +// - Nesting is limited (to a configurable depth, 200 by default). +// - Numbers are limited to those representable by a finite double. The +// conversion from a JSON number (in the base::StringPiece input) to a +// double-flavored base::Value may also be lossy. +// - The input (which must be UTF-8) may begin with a BOM (Byte Order Mark). +// - Duplicate object keys (strings) are silently allowed. Last key-value pair +// wins. Previous pairs are discarded. // -// TODO(tc): Add a parsing option to to relax object keys being wrapped in -// double quotes -// TODO(tc): Add an option to disable comment stripping +// Configurable (see the JSONParserOptions type) deviations from the RFC: +// - Allow trailing commas: "[1,2,]". +// - Replace invalid Unicode with U+FFFD REPLACEMENT CHARACTER. +// +// Non-configurable deviations from the RFC: +// - Allow "// etc\n" and "/* etc */" C-style comments. +// - Allow ASCII control characters, including literal (not escaped) NUL bytes +// and new lines, within a JSON string. +// - Allow "\\v" escapes within a JSON string, producing a vertical tab. +// - Allow "\\x23" escapes within a JSON string. Subtly, the 2-digit hex value +// is a Unicode code point, not a UTF-8 byte. For example, "\\xFF" in the +// JSON source decodes to a base::Value whose string contains "\xC3\xBF", the +// UTF-8 encoding of U+00FF LATIN SMALL LETTER Y WITH DIAERESIS. Converting +// from UTF-8 to UTF-16, e.g. via UTF8ToWide, will recover a 16-bit 0x00FF. #ifndef BASE_JSON_JSON_READER_H_ #define BASE_JSON_JSON_READER_H_ @@ -44,7 +54,7 @@ class JSONParser; } enum JSONParserOptions { - // Parses the input strictly according to RFC 4627, except for where noted + // Parses the input strictly according to RFC 8259, except for where noted // above. JSON_PARSE_RFC = 0, @@ -159,10 +169,6 @@ class BASE_EXPORT JSONReader { // Non-static version of Read() above. std::unique_ptr<Value> ReadToValueDeprecated(StringPiece json); - // Returns the error code if the last call to ReadToValue() failed. - // Returns JSON_NO_ERROR otherwise. - JsonParseError error_code() const; - // Converts error_code_ to a human-readable string, including line and column // numbers if appropriate. std::string GetErrorMessage() const; diff --git a/chromium/base/json/json_reader_unittest.cc b/chromium/base/json/json_reader_unittest.cc index 784600a29e7..33570ad4b48 100644 --- a/chromium/base/json/json_reader_unittest.cc +++ b/chromium/base/json/json_reader_unittest.cc @@ -14,11 +14,27 @@ #include "base/path_service.h" #include "base/stl_util.h" #include "base/strings/string_piece.h" +#include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "base/values.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" +namespace { + +// MSan will do a better job detecting over-read errors if the input is not +// nul-terminated on the heap. This will copy |input| to a new buffer owned by +// |owner|, returning a base::StringPiece to |owner|. +base::StringPiece MakeNotNullTerminatedInput(const char* input, + std::unique_ptr<char[]>* owner) { + size_t str_len = strlen(input); + owner->reset(new char[str_len]); + memcpy(owner->get(), input, str_len); + return base::StringPiece(owner->get(), str_len); +} + +} // namespace + namespace base { TEST(JSONReaderTest, Whitespace) { @@ -91,7 +107,7 @@ TEST(JSONReaderTest, Ints) { } TEST(JSONReaderTest, NonDecimalNumbers) { - // According to RFC4627, oct, hex, and leading zeros are invalid JSON. + // According to RFC 8259, oct, hex, and leading zeros are invalid JSON. EXPECT_FALSE(JSONReader::Read("043")); EXPECT_FALSE(JSONReader::Read("0x43")); EXPECT_FALSE(JSONReader::Read("00")); @@ -113,61 +129,61 @@ TEST(JSONReaderTest, LargeIntPromotion) { // storage as doubles Optional<Value> root = JSONReader::Read("2147483648"); ASSERT_TRUE(root); - double double_val; EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(2147483648.0, double_val); + EXPECT_DOUBLE_EQ(2147483648.0, root->GetDouble()); root = JSONReader::Read("-2147483649"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(-2147483649.0, double_val); + EXPECT_DOUBLE_EQ(-2147483649.0, root->GetDouble()); +} + +TEST(JSONReaderTest, LargerIntIsLossy) { + // Parse LONG_MAX as a JSON number (not a JSON string). The result of the + // parse is a base::Value, either a (32-bit) int or a (64-bit) double. + // LONG_MAX would overflow an int and can only be approximated by a double. + // In this case, parsing is lossy. + const char* etc807 = "9223372036854775807"; + const char* etc808 = "9223372036854775808.000000"; + Optional<Value> root = JSONReader::Read(etc807); + ASSERT_TRUE(root); + ASSERT_FALSE(root->is_int()); + ASSERT_TRUE(root->is_double()); + // We use StringPrintf instead of NumberToString, because the NumberToString + // function does not let you specify the precision, and its default output, + // "9.223372036854776e+18", isn't precise enough to see the lossiness. + EXPECT_EQ(std::string(etc808), StringPrintf("%f", root->GetDouble())); } TEST(JSONReaderTest, Doubles) { Optional<Value> root = JSONReader::Read("43.1"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(43.1, double_val); + EXPECT_DOUBLE_EQ(43.1, root->GetDouble()); root = JSONReader::Read("4.3e-1"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(.43, double_val); + EXPECT_DOUBLE_EQ(.43, root->GetDouble()); root = JSONReader::Read("2.1e0"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(2.1, double_val); + EXPECT_DOUBLE_EQ(2.1, root->GetDouble()); root = JSONReader::Read("2.1e+0001"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(21.0, double_val); + EXPECT_DOUBLE_EQ(21.0, root->GetDouble()); root = JSONReader::Read("0.01"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(0.01, double_val); + EXPECT_DOUBLE_EQ(0.01, root->GetDouble()); root = JSONReader::Read("1.00"); ASSERT_TRUE(root); EXPECT_TRUE(root->is_double()); - double_val = 0.0; - EXPECT_TRUE(root->GetAsDouble(&double_val)); - EXPECT_DOUBLE_EQ(1.0, double_val); + EXPECT_DOUBLE_EQ(1.0, root->GetDouble()); // This is syntaxtically valid, but out of range of a double. auto value_with_error = @@ -191,7 +207,8 @@ TEST(JSONReaderTest, ExponentialNumbers) { EXPECT_FALSE(JSONReader::Read("1e1.0")); } -TEST(JSONReaderTest, InvalidNAN) { +TEST(JSONReaderTest, InvalidInfNAN) { + // The largest finite double is roughly 1.8e308. EXPECT_FALSE(JSONReader::Read("1e1000")); EXPECT_FALSE(JSONReader::Read("-1e1000")); EXPECT_FALSE(JSONReader::Read("NaN")); @@ -200,6 +217,8 @@ TEST(JSONReaderTest, InvalidNAN) { } TEST(JSONReaderTest, InvalidNumbers) { + EXPECT_TRUE(JSONReader::Read("4.3")); + EXPECT_FALSE(JSONReader::Read("4.")); EXPECT_FALSE(JSONReader::Read("4.3.1")); EXPECT_FALSE(JSONReader::Read("4e3.1")); EXPECT_FALSE(JSONReader::Read("4.a")); @@ -234,12 +253,12 @@ TEST(JSONReaderTest, BasicStringEscapes) { TEST(JSONReaderTest, UnicodeEscapes) { // Test hex and unicode escapes including the null character. - Optional<Value> root = JSONReader::Read("\"\\x41\\x00\\u1234\\u0000\""); + Optional<Value> root = JSONReader::Read("\"\\x41\\xFF\\x00\\u1234\\u0000\""); ASSERT_TRUE(root); EXPECT_TRUE(root->is_string()); std::string str_val; EXPECT_TRUE(root->GetAsString(&str_val)); - EXPECT_EQ(std::wstring(L"A\0\x1234\0", 4), UTF8ToWide(str_val)); + EXPECT_EQ(std::wstring(L"A\x00FF\0\x1234\0", 5), UTF8ToWide(str_val)); // The contents of a Unicode escape may only be four hex chars. Previously the // parser accepted things like "0x01" and "0X01". @@ -435,6 +454,16 @@ TEST(JSONReaderTest, DictionaryKeysWithPeriods) { EXPECT_EQ(1, *integer_value); } +TEST(JSONReaderTest, DuplicateKeys) { + Optional<Value> dict_val = JSONReader::Read("{\"x\":1,\"x\":2,\"y\":3}"); + ASSERT_TRUE(dict_val); + ASSERT_TRUE(dict_val->is_dict()); + + auto integer_value = dict_val->FindIntKey("x"); + ASSERT_TRUE(integer_value); + EXPECT_EQ(2, *integer_value); +} + TEST(JSONReaderTest, InvalidDictionaries) { // No closing brace. EXPECT_FALSE(JSONReader::Read("{\"a\": true")); @@ -710,17 +739,29 @@ TEST(JSONReaderTest, InvalidSanity) { JSONReader reader; LOG(INFO) << "Sanity test " << i << ": <" << kInvalidJson[i] << ">"; EXPECT_FALSE(reader.ReadToValue(kInvalidJson[i])); - EXPECT_NE(JSONReader::JSON_NO_ERROR, reader.error_code()); EXPECT_NE("", reader.GetErrorMessage()); } } TEST(JSONReaderTest, IllegalTrailingNull) { - const char json[] = { '"', 'n', 'u', 'l', 'l', '"', '\0' }; + const char json[] = {'"', 'n', 'u', 'l', 'l', '"', '\0'}; std::string json_string(json, sizeof(json)); JSONReader reader; EXPECT_FALSE(reader.ReadToValue(json_string)); - EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, reader.error_code()); + EXPECT_NE("", reader.GetErrorMessage()); +} + +TEST(JSONReaderTest, ASCIIControlCodes) { + // A literal NUL byte or a literal new line, in a JSON string, should be + // rejected. RFC 8259 section 7 says "the characters that MUST be escaped + // [include]... the control characters (U+0000 through U+001F)". + // + // Nonetheless, we accept them, for backwards compatibility. + const char json[] = {'"', 'a', '\0', 'b', '\n', 'c', '"'}; + Optional<Value> root = JSONReader::Read(std::string(json, sizeof(json))); + ASSERT_TRUE(root); + ASSERT_TRUE(root->is_string()); + EXPECT_EQ(5u, root->GetString().length()); } TEST(JSONReaderTest, MaxNesting) { @@ -729,4 +770,231 @@ TEST(JSONReaderTest, MaxNesting) { EXPECT_TRUE(JSONReader::Read(json, JSON_PARSE_RFC, 4)); } +TEST(JSONReaderTest, Decode4ByteUtf8Char) { + // kUtf8Data contains a 4 byte unicode character (a smiley!) that JSONReader + // should be able to handle. The UTF-8 encoding of U+1F607 SMILING FACE WITH + // HALO is "\xF0\x9F\x98\x87". + const char kUtf8Data[] = "[\"😇\",[],[],[],{\"google:suggesttype\":[]}]"; + Optional<Value> root = JSONReader::Read(kUtf8Data, JSON_PARSE_RFC); + ASSERT_TRUE(root); + ASSERT_TRUE(root->is_list()); + Value::ListView lv = root->GetList(); + ASSERT_EQ(5u, lv.size()); + ASSERT_TRUE(lv[0].is_string()); + EXPECT_EQ("\xF0\x9F\x98\x87", lv[0].GetString()); +} + +TEST(JSONReaderTest, DecodeUnicodeNonCharacter) { + // Tests Unicode code points (encoded as escaped UTF-16) that are not valid + // characters. + EXPECT_TRUE(JSONReader::Read("[\"\\uFDD0\"]")); // U+FDD0 + EXPECT_TRUE(JSONReader::Read("[\"\\uFDDF\"]")); // U+FDDF + EXPECT_TRUE(JSONReader::Read("[\"\\uFDEF\"]")); // U+FDEF + EXPECT_TRUE(JSONReader::Read("[\"\\uFFFE\"]")); // U+FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uFFFF\"]")); // U+FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD83F\\uDFFE\"]")); // U+01FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD83F\\uDFFF\"]")); // U+01FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD87F\\uDFFE\"]")); // U+02FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD87F\\uDFFF\"]")); // U+02FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD8BF\\uDFFE\"]")); // U+03FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD8BF\\uDFFF\"]")); // U+03FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD8FF\\uDFFE\"]")); // U+04FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD8FF\\uDFFF\"]")); // U+04FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD93F\\uDFFE\"]")); // U+05FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD93F\\uDFFF\"]")); // U+05FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD97F\\uDFFE\"]")); // U+06FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD97F\\uDFFF\"]")); // U+06FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD9BF\\uDFFE\"]")); // U+07FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD9BF\\uDFFF\"]")); // U+07FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uD9FF\\uDFFE\"]")); // U+08FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uD9FF\\uDFFF\"]")); // U+08FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDA3F\\uDFFE\"]")); // U+09FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDA3F\\uDFFF\"]")); // U+09FFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDA7F\\uDFFE\"]")); // U+0AFFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDA7F\\uDFFF\"]")); // U+0AFFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDABF\\uDFFE\"]")); // U+0BFFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDABF\\uDFFF\"]")); // U+0BFFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDAFF\\uDFFE\"]")); // U+0CFFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDAFF\\uDFFF\"]")); // U+0CFFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDB3F\\uDFFE\"]")); // U+0DFFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDB3F\\uDFFF\"]")); // U+0DFFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDB7F\\uDFFE\"]")); // U+0EFFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDB7F\\uDFFF\"]")); // U+0EFFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDBBF\\uDFFE\"]")); // U+0FFFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDBBF\\uDFFF\"]")); // U+0FFFFF + EXPECT_TRUE(JSONReader::Read("[\"\\uDBFF\\uDFFE\"]")); // U+10FFFE + EXPECT_TRUE(JSONReader::Read("[\"\\uDBFF\\uDFFF\"]")); // U+10FFFF +} + +TEST(JSONReaderTest, DecodeNegativeEscapeSequence) { + EXPECT_FALSE(JSONReader::Read("[\"\\x-A\"]")); + EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]")); +} + +// Verifies invalid code points are replaced. +TEST(JSONReaderTest, ReplaceInvalidCharacters) { + // U+D800 is a lone surrogate. + const std::string invalid = "\"\xED\xA0\x80\""; + Optional<Value> value = + JSONReader::Read(invalid, JSON_REPLACE_INVALID_CHARACTERS); + ASSERT_TRUE(value); + ASSERT_TRUE(value->is_string()); + // Expect three U+FFFD (one for each UTF-8 byte in the invalid code point). + EXPECT_EQ("\xEF\xBF\xBD\xEF\xBF\xBD\xEF\xBF\xBD", value->GetString()); +} + +TEST(JSONReaderTest, ReplaceInvalidUTF16EscapeSequence) { + // U+D800 is a lone surrogate. + const std::string invalid = "\"_\\uD800_\""; + Optional<Value> value = + JSONReader::Read(invalid, JSON_REPLACE_INVALID_CHARACTERS); + ASSERT_TRUE(value); + ASSERT_TRUE(value->is_string()); + EXPECT_EQ("_\xEF\xBF\xBD_", value->GetString()); +} + +TEST(JSONReaderTest, ParseNumberErrors) { + const struct { + const char* input; + bool parse_success; + double value; + } kCases[] = { + // clang-format off + {"1", true, 1}, + {"2.", false, 0}, + {"42", true, 42}, + {"6e", false, 0}, + {"43e2", true, 4300}, + {"43e-", false, 0}, + {"9e-3", true, 0.009}, + {"2e+", false, 0}, + {"2e+2", true, 200}, + // clang-format on + }; + + for (unsigned int i = 0; i < base::size(kCases); ++i) { + auto test_case = kCases[i]; + SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input)); + + std::unique_ptr<char[]> input_owner; + StringPiece input = + MakeNotNullTerminatedInput(test_case.input, &input_owner); + + Optional<Value> result = JSONReader::Read(input); + EXPECT_EQ(test_case.parse_success, result.has_value()); + + if (!result) + continue; + + ASSERT_TRUE(result->is_double() || result->is_int()); + EXPECT_EQ(test_case.value, result->GetDouble()); + } +} + +TEST(JSONReaderTest, UnterminatedInputs) { + const char* const kCases[] = { + // clang-format off + "/", + "//", + "/*", + "\"xxxxxx", + "\"", + "{ ", + "[\t", + "tru", + "fals", + "nul", + "\"\\x", + "\"\\x2", + "\"\\u123", + "\"\\uD803\\u", + "\"\\", + "\"\\/", + // clang-format on + }; + + for (unsigned int i = 0; i < base::size(kCases); ++i) { + auto* test_case = kCases[i]; + SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case)); + + std::unique_ptr<char[]> input_owner; + StringPiece input = MakeNotNullTerminatedInput(test_case, &input_owner); + + EXPECT_FALSE(JSONReader::Read(input)); + } +} + +TEST(JSONReaderTest, LineColumnCounting) { + const struct { + const char* input; + int error_line; + int error_column; + } kCases[] = { + // For all but the "q_is_not_etc" case, the error (indicated by ^ in the + // comments) is seeing a digit when expecting ',' or ']'. + { + // Line and column counts are 1-based, not 0-based. + "q_is_not_the_start_of_any_valid_JSON_token", + 1, + 1, + }, + { + "[2,4,6 8", + // -----^ + 1, + 8, + }, + { + "[2,4,6\t8", + // ------^ + 1, + 8, + }, + { + "[2,4,6\n8", + // ------^ + 2, + 1, + }, + { + "[\n0,\n1,\n2,\n3,4,5,6 7,\n8,\n9\n]", + // ---------------------^ + 5, + 9, + }, + { + // Same as the previous example, but with "\r\n"s instead of "\n"s. + "[\r\n0,\r\n1,\r\n2,\r\n3,4,5,6 7,\r\n8,\r\n9\r\n]", + // -----------------------------^ + 5, + 9, + }, + // The JSON spec forbids unescaped ASCII control characters (including + // line breaks) within a string, but our implementation is more lenient. + { + "[\"3\n1\" 4", + // --------^ + 2, + 4, + }, + { + "[\"3\r\n1\" 4", + // ----------^ + 2, + 4, + }, + }; + + for (unsigned int i = 0; i < base::size(kCases); ++i) { + auto test_case = kCases[i]; + SCOPED_TRACE(StringPrintf("case %u: \"%s\"", i, test_case.input)); + + JSONReader::ValueWithError root = JSONReader::ReadAndReturnValueWithError( + test_case.input, JSON_PARSE_RFC); + EXPECT_FALSE(root.value); + EXPECT_EQ(test_case.error_line, root.error_line); + EXPECT_EQ(test_case.error_column, root.error_column); + } +} + } // namespace base diff --git a/chromium/base/json/json_string_value_serializer.cc b/chromium/base/json/json_string_value_serializer.cc index f97909032c7..0ec48ca57fe 100644 --- a/chromium/base/json/json_string_value_serializer.cc +++ b/chromium/base/json/json_string_value_serializer.cc @@ -6,7 +6,6 @@ #include "base/json/json_reader.h" #include "base/json/json_writer.h" -#include "base/logging.h" using base::Value; diff --git a/chromium/base/json/json_value_serializer_unittest.cc b/chromium/base/json/json_value_serializer_unittest.cc index 443c7daa0dd..5722cb0f0dd 100644 --- a/chromium/base/json/json_value_serializer_unittest.cc +++ b/chromium/base/json/json_value_serializer_unittest.cc @@ -149,8 +149,7 @@ TEST(JSONValueDeserializerTest, ReadProperJSONFromFile) { ASSERT_TRUE(tempdir.CreateUniqueTempDir()); // Write it down in the file. FilePath temp_file(tempdir.GetPath().AppendASCII("test.json")); - ASSERT_EQ(static_cast<int>(strlen(kProperJSON)), - WriteFile(temp_file, kProperJSON, strlen(kProperJSON))); + ASSERT_TRUE(WriteFile(temp_file, kProperJSON)); // Try to deserialize it through the serializer. JSONFileValueDeserializer file_deserializer(temp_file); @@ -173,9 +172,7 @@ TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) { ASSERT_TRUE(tempdir.CreateUniqueTempDir()); // Write it down in the file. FilePath temp_file(tempdir.GetPath().AppendASCII("test.json")); - ASSERT_EQ(static_cast<int>(strlen(kProperJSONWithCommas)), - WriteFile(temp_file, kProperJSONWithCommas, - strlen(kProperJSONWithCommas))); + ASSERT_TRUE(WriteFile(temp_file, kProperJSONWithCommas)); // Try to deserialize it through the serializer. JSONFileValueDeserializer file_deserializer(temp_file); diff --git a/chromium/base/json/string_escape.h b/chromium/base/json/string_escape.h index f75f475afc6..4f62b6d2934 100644 --- a/chromium/base/json/string_escape.h +++ b/chromium/base/json/string_escape.h @@ -52,7 +52,7 @@ BASE_EXPORT std::string GetQuotedJSONString(StringPiece16 str); // interpret it as UTF-16 and convert it to UTF-8. // // The output of this function takes the *appearance* of JSON but is not in -// fact valid according to RFC 4627. +// fact valid according to RFC 8259. BASE_EXPORT std::string EscapeBytesAsInvalidJSONString(StringPiece str, bool put_in_quotes); diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc index 608cc1122b9..fdd86299aa4 100644 --- a/chromium/base/logging.cc +++ b/chromium/base/logging.cc @@ -4,6 +4,13 @@ #include "base/logging.h" +// logging.h is a widely included header and its size has significant impact on +// build time. Try not to raise this limit unless absolutely necessary. See +// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md +#ifndef NACL_TC_REV +#pragma clang max_tokens_here 370000 +#endif // NACL_TC_REV + #include <limits.h> #include <stdint.h> @@ -439,7 +446,7 @@ bool BaseInitLoggingImpl(const LoggingSettings& settings) { const char* log_tag_data = log_tag.data(); config.tags = &log_tag_data; config.num_tags = 1; - fx_log_init_with_config(&config); + fx_log_reconfigure(&config); } #endif @@ -551,22 +558,6 @@ LogMessageHandlerFunction GetLogMessageHandler() { return log_message_handler; } -// Explicit instantiations for commonly used comparisons. -template std::string* MakeCheckOpString<int, int>( - const int&, const int&, const char* names); -template std::string* MakeCheckOpString<unsigned long, unsigned long>( - const unsigned long&, const unsigned long&, const char* names); -template std::string* MakeCheckOpString<unsigned long, unsigned int>( - const unsigned long&, const unsigned int&, const char* names); -template std::string* MakeCheckOpString<unsigned int, unsigned long>( - const unsigned int&, const unsigned long&, const char* names); -template std::string* MakeCheckOpString<std::string, std::string>( - const std::string&, const std::string&, const char* name); - -void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p) { - (*os) << "nullptr"; -} - #if !defined(NDEBUG) // Displays a message box to the user with the error message in it. // Used for fatal messages, where we close the app simultaneously. @@ -604,21 +595,6 @@ LogMessage::LogMessage(const char* file, int line, const char* condition) stream_ << "Check failed: " << condition << ". "; } -LogMessage::LogMessage(const char* file, int line, std::string* result) - : severity_(LOG_FATAL), file_(file), line_(line) { - Init(file, line); - stream_ << "Check failed: " << *result; - delete result; -} - -LogMessage::LogMessage(const char* file, int line, LogSeverity severity, - std::string* result) - : severity_(severity), file_(file), line_(line) { - Init(file, line); - stream_ << "Check failed: " << *result; - delete result; -} - LogMessage::~LogMessage() { size_t stack_start = stream_.tellp(); #if !defined(OFFICIAL_BUILD) && !defined(OS_NACL) && !defined(__UCLIBC__) && \ @@ -1066,9 +1042,7 @@ Win32ErrorLogMessage::Win32ErrorLogMessage(const char* file, int line, LogSeverity severity, SystemErrorCode err) - : err_(err), - log_message_(file, line, severity) { -} + : LogMessage(file, line, severity), err_(err) {} Win32ErrorLogMessage::~Win32ErrorLogMessage() { stream() << ": " << SystemErrorCodeToString(err_); @@ -1082,9 +1056,7 @@ ErrnoLogMessage::ErrnoLogMessage(const char* file, int line, LogSeverity severity, SystemErrorCode err) - : err_(err), - log_message_(file, line, severity) { -} + : LogMessage(file, line, severity), err_(err) {} ErrnoLogMessage::~ErrnoLogMessage() { stream() << ": " << SystemErrorCodeToString(err_); @@ -1167,11 +1139,6 @@ std::wstring GetLogFileFullPath() { } #endif -BASE_EXPORT void LogErrorNotReached(const char* file, int line) { - LogMessage(file, line, LOG_ERROR).stream() - << "NOTREACHED() hit."; -} - } // namespace logging std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) { diff --git a/chromium/base/logging.h b/chromium/base/logging.h index cb24b948331..4bc8b00ef88 100644 --- a/chromium/base/logging.h +++ b/chromium/base/logging.h @@ -9,22 +9,18 @@ #include <cassert> #include <cstdint> -#include <cstring> #include <sstream> #include <string> -#include <type_traits> -#include <utility> #include "base/base_export.h" #include "base/callback_forward.h" +#include "base/check.h" +#include "base/check_op.h" #include "base/compiler_specific.h" -#include "base/immediate_crash.h" -#include "base/logging_buildflags.h" #include "base/macros.h" +#include "base/notreached.h" #include "base/scoped_clear_last_error.h" #include "base/strings/string_piece_forward.h" -#include "base/template_util.h" -#include "build/build_config.h" #if defined(OS_CHROMEOS) #include <cstdio> @@ -338,37 +334,6 @@ typedef bool (*LogMessageHandlerFunction)(int severity, BASE_EXPORT void SetLogMessageHandler(LogMessageHandlerFunction handler); BASE_EXPORT LogMessageHandlerFunction GetLogMessageHandler(); -// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints -// to Clang which control what code paths are statically analyzed, -// and is meant to be used in conjunction with assert & assert-like functions. -// The expression is passed straight through if analysis isn't enabled. -// -// ANALYZER_SKIP_THIS_PATH() suppresses static analysis for the current -// codepath and any other branching codepaths that might follow. -#if defined(__clang_analyzer__) - -inline constexpr bool AnalyzerNoReturn() __attribute__((analyzer_noreturn)) { - return false; -} - -inline constexpr bool AnalyzerAssumeTrue(bool arg) { - // AnalyzerNoReturn() is invoked and analysis is terminated if |arg| is - // false. - return arg || AnalyzerNoReturn(); -} - -#define ANALYZER_ASSUME_TRUE(arg) logging::AnalyzerAssumeTrue(!!(arg)) -#define ANALYZER_SKIP_THIS_PATH() \ - static_cast<void>(::logging::AnalyzerNoReturn()) -#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var); - -#else // !defined(__clang_analyzer__) - -#define ANALYZER_ASSUME_TRUE(arg) (arg) -#define ANALYZER_SKIP_THIS_PATH() -#define ANALYZER_ALLOW_UNUSED(var) static_cast<void>(var); - -#endif // defined(__clang_analyzer__) typedef int LogSeverity; const LogSeverity LOG_VERBOSE = -1; // This is level 1 verbosity @@ -525,203 +490,6 @@ BASE_EXPORT extern std::ostream* g_swallow_stream; true ? (void)0 \ : ::logging::LogMessageVoidify() & (*::logging::g_swallow_stream) -// Captures the result of a CHECK_EQ (for example) and facilitates testing as a -// boolean. -class CheckOpResult { - public: - // |message| must be non-null if and only if the check failed. - constexpr CheckOpResult(std::string* message) : message_(message) {} - // Returns true if the check succeeded. - constexpr operator bool() const { return !message_; } - // Returns the message. - std::string* message() { return message_; } - - private: - std::string* message_; -}; - -// CHECK dies with a fatal error if condition is not true. It is *not* -// controlled by NDEBUG, so the check will be executed regardless of -// compilation mode. -// -// We make sure CHECK et al. always evaluates their arguments, as -// doing CHECK(FunctionWithSideEffect()) is a common idiom. - -#if defined(OFFICIAL_BUILD) && defined(NDEBUG) - -// Make all CHECK functions discard their log strings to reduce code bloat, and -// improve performance, for official release builds. -// -// This is not calling BreakDebugger since this is called frequently, and -// calling an out-of-line function instead of a noreturn inline macro prevents -// compiler optimizations. -#define CHECK(condition) \ - UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_STREAM_PARAMETERS - -// PCHECK includes the system error code, which is useful for determining -// why the condition failed. In official builds, preserve only the error code -// message so that it is available in crash reports. The stringified -// condition and any additional stream parameters are dropped. -#define PCHECK(condition) \ - LAZY_STREAM(PLOG_STREAM(FATAL), UNLIKELY(!(condition))); \ - EAT_STREAM_PARAMETERS - -#define CHECK_OP(name, op, val1, val2) CHECK((val1) op (val2)) - -#else // !(OFFICIAL_BUILD && NDEBUG) - -// Do as much work as possible out of line to reduce inline code size. -#define CHECK(condition) \ - LAZY_STREAM(::logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \ - !ANALYZER_ASSUME_TRUE(condition)) - -#define PCHECK(condition) \ - LAZY_STREAM(PLOG_STREAM(FATAL), !ANALYZER_ASSUME_TRUE(condition)) \ - << "Check failed: " #condition ". " - -// Helper macro for binary operators. -// Don't use this macro directly in your code, use CHECK_EQ et al below. -// The 'switch' is used to prevent the 'else' from being ambiguous when the -// macro is used in an 'if' clause such as: -// if (a == 1) -// CHECK_EQ(2, a); -#define CHECK_OP(name, op, val1, val2) \ - switch (0) case 0: default: \ - if (::logging::CheckOpResult true_if_passed = \ - ::logging::Check##name##Impl((val1), (val2), \ - #val1 " " #op " " #val2)) \ - ; \ - else \ - ::logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream() - -#endif // !(OFFICIAL_BUILD && NDEBUG) - -// This formats a value for a failing CHECK_XX statement. Ordinarily, -// it uses the definition for operator<<, with a few special cases below. -template <typename T> -inline typename std::enable_if< - base::internal::SupportsOstreamOperator<const T&>::value && - !std::is_function<typename std::remove_pointer<T>::type>::value, - void>::type -MakeCheckOpValueString(std::ostream* os, const T& v) { - (*os) << v; -} - -// Overload for types that no operator<< but do have .ToString() defined. -template <typename T> -inline typename std::enable_if< - !base::internal::SupportsOstreamOperator<const T&>::value && - base::internal::SupportsToString<const T&>::value, - void>::type -MakeCheckOpValueString(std::ostream* os, const T& v) { - (*os) << v.ToString(); -} - -// Provide an overload for functions and function pointers. Function pointers -// don't implicitly convert to void* but do implicitly convert to bool, so -// without this function pointers are always printed as 1 or 0. (MSVC isn't -// standards-conforming here and converts function pointers to regular -// pointers, so this is a no-op for MSVC.) -template <typename T> -inline typename std::enable_if< - std::is_function<typename std::remove_pointer<T>::type>::value, - void>::type -MakeCheckOpValueString(std::ostream* os, const T& v) { - (*os) << reinterpret_cast<const void*>(v); -} - -// We need overloads for enums that don't support operator<<. -// (i.e. scoped enums where no operator<< overload was declared). -template <typename T> -inline typename std::enable_if< - !base::internal::SupportsOstreamOperator<const T&>::value && - std::is_enum<T>::value, - void>::type -MakeCheckOpValueString(std::ostream* os, const T& v) { - (*os) << static_cast<typename std::underlying_type<T>::type>(v); -} - -// We need an explicit overload for std::nullptr_t. -BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p); - -// Build the error message string. This is separate from the "Impl" -// function template because it is not performance critical and so can -// be out of line, while the "Impl" code should be inline. Caller -// takes ownership of the returned string. -template<class t1, class t2> -std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) { - std::ostringstream ss; - ss << names << " ("; - MakeCheckOpValueString(&ss, v1); - ss << " vs. "; - MakeCheckOpValueString(&ss, v2); - ss << ")"; - std::string* msg = new std::string(ss.str()); - return msg; -} - -// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated -// in logging.cc. -extern template BASE_EXPORT std::string* MakeCheckOpString<int, int>( - const int&, const int&, const char* names); -extern template BASE_EXPORT -std::string* MakeCheckOpString<unsigned long, unsigned long>( - const unsigned long&, const unsigned long&, const char* names); -extern template BASE_EXPORT -std::string* MakeCheckOpString<unsigned long, unsigned int>( - const unsigned long&, const unsigned int&, const char* names); -extern template BASE_EXPORT -std::string* MakeCheckOpString<unsigned int, unsigned long>( - const unsigned int&, const unsigned long&, const char* names); -extern template BASE_EXPORT -std::string* MakeCheckOpString<std::string, std::string>( - const std::string&, const std::string&, const char* name); - -// Helper functions for CHECK_OP macro. -// The (int, int) specialization works around the issue that the compiler -// will not instantiate the template version of the function on values of -// unnamed enum type - see comment below. -// -// The checked condition is wrapped with ANALYZER_ASSUME_TRUE, which under -// static analysis builds, blocks analysis of the current path if the -// condition is false. -#define DEFINE_CHECK_OP_IMPL(name, op) \ - template <class t1, class t2> \ - constexpr std::string* Check##name##Impl(const t1& v1, const t2& v2, \ - const char* names) { \ - if (ANALYZER_ASSUME_TRUE(v1 op v2)) \ - return nullptr; \ - else \ - return ::logging::MakeCheckOpString(v1, v2, names); \ - } \ - constexpr std::string* Check##name##Impl(int v1, int v2, \ - const char* names) { \ - if (ANALYZER_ASSUME_TRUE(v1 op v2)) \ - return nullptr; \ - else \ - return ::logging::MakeCheckOpString(v1, v2, names); \ - } -DEFINE_CHECK_OP_IMPL(EQ, ==) -DEFINE_CHECK_OP_IMPL(NE, !=) -DEFINE_CHECK_OP_IMPL(LE, <=) -DEFINE_CHECK_OP_IMPL(LT, < ) -DEFINE_CHECK_OP_IMPL(GE, >=) -DEFINE_CHECK_OP_IMPL(GT, > ) -#undef DEFINE_CHECK_OP_IMPL - -#define CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2) -#define CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2) -#define CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2) -#define CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2) -#define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2) -#define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2) - -#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) -#define DCHECK_IS_ON() false -#else -#define DCHECK_IS_ON() true -#endif - // Definitions for DLOG et al. #if DCHECK_IS_ON() @@ -776,106 +544,6 @@ const LogSeverity LOG_DCHECK = LOG_FATAL; #endif // DCHECK_IS_ON() -// DCHECK et al. make sure to reference |condition| regardless of -// whether DCHECKs are enabled; this is so that we don't get unused -// variable warnings if the only use of a variable is in a DCHECK. -// This behavior is different from DLOG_IF et al. -// -// Note that the definition of the DCHECK macros depends on whether or not -// DCHECK_IS_ON() is true. When DCHECK_IS_ON() is false, the macros use -// EAT_STREAM_PARAMETERS to avoid expressions that would create temporaries. - -#if DCHECK_IS_ON() - -#define DCHECK(condition) \ - LAZY_STREAM(LOG_STREAM(DCHECK), !ANALYZER_ASSUME_TRUE(condition)) \ - << "Check failed: " #condition ". " -#define DPCHECK(condition) \ - LAZY_STREAM(PLOG_STREAM(DCHECK), !ANALYZER_ASSUME_TRUE(condition)) \ - << "Check failed: " #condition ". " - -#else // DCHECK_IS_ON() - -#define DCHECK(condition) EAT_STREAM_PARAMETERS << !(condition) -#define DPCHECK(condition) EAT_STREAM_PARAMETERS << !(condition) - -#endif // DCHECK_IS_ON() - -// Helper macro for binary operators. -// Don't use this macro directly in your code, use DCHECK_EQ et al below. -// The 'switch' is used to prevent the 'else' from being ambiguous when the -// macro is used in an 'if' clause such as: -// if (a == 1) -// DCHECK_EQ(2, a); -#if DCHECK_IS_ON() - -#define DCHECK_OP(name, op, val1, val2) \ - switch (0) case 0: default: \ - if (::logging::CheckOpResult true_if_passed = \ - ::logging::Check##name##Impl((val1), (val2), \ - #val1 " " #op " " #val2)) \ - ; \ - else \ - ::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \ - true_if_passed.message()).stream() - -#else // DCHECK_IS_ON() - -// When DCHECKs aren't enabled, DCHECK_OP still needs to reference operator<< -// overloads for |val1| and |val2| to avoid potential compiler warnings about -// unused functions. For the same reason, it also compares |val1| and |val2| -// using |op|. -// -// Note that the contract of DCHECK_EQ, etc is that arguments are only evaluated -// once. Even though |val1| and |val2| appear twice in this version of the macro -// expansion, this is OK, since the expression is never actually evaluated. -#define DCHECK_OP(name, op, val1, val2) \ - EAT_STREAM_PARAMETERS << (::logging::MakeCheckOpValueString( \ - ::logging::g_swallow_stream, val1), \ - ::logging::MakeCheckOpValueString( \ - ::logging::g_swallow_stream, val2), \ - (val1)op(val2)) - -#endif // DCHECK_IS_ON() - -// Equality/Inequality checks - compare two values, and log a -// LOG_DCHECK message including the two values when the result is not -// as expected. The values must have operator<<(ostream, ...) -// defined. -// -// You may append to the error message like so: -// DCHECK_NE(1, 2) << "The world must be ending!"; -// -// We are very careful to ensure that each argument is evaluated exactly -// once, and that anything which is legal to pass as a function argument is -// legal here. In particular, the arguments may be temporary expressions -// which will end up being destroyed at the end of the apparent statement, -// for example: -// DCHECK_EQ(string("abc")[1], 'b'); -// -// WARNING: These don't compile correctly if one of the arguments is a pointer -// and the other is NULL. In new code, prefer nullptr instead. To -// work around this for C++98, simply static_cast NULL to the type of the -// desired pointer. - -#define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2) -#define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2) -#define DCHECK_LE(val1, val2) DCHECK_OP(LE, <=, val1, val2) -#define DCHECK_LT(val1, val2) DCHECK_OP(LT, < , val1, val2) -#define DCHECK_GE(val1, val2) DCHECK_OP(GE, >=, val1, val2) -#define DCHECK_GT(val1, val2) DCHECK_OP(GT, > , val1, val2) - -#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED) -// Implement logging of NOTREACHED() as a dedicated function to get function -// call overhead down to a minimum. -void LogErrorNotReached(const char* file, int line); -#define NOTREACHED() \ - true ? ::logging::LogErrorNotReached(__FILE__, __LINE__) \ - : EAT_STREAM_PARAMETERS -#else -#define NOTREACHED() DCHECK(false) -#endif - // Redefine the standard assert to use our nice log files #undef assert #define assert(x) DLOG_ASSERT(x) @@ -896,15 +564,7 @@ class BASE_EXPORT LogMessage { // Used for CHECK(). Implied severity = LOG_FATAL. LogMessage(const char* file, int line, const char* condition); - // Used for CHECK_EQ(), etc. Takes ownership of the given string. - // Implied severity = LOG_FATAL. - LogMessage(const char* file, int line, std::string* result); - - // Used for DCHECK_EQ(), etc. Takes ownership of the given string. - LogMessage(const char* file, int line, LogSeverity severity, - std::string* result); - - ~LogMessage(); + virtual ~LogMessage(); std::ostream& stream() { return stream_; } @@ -926,7 +586,7 @@ class BASE_EXPORT LogMessage { // This is useful since the LogMessage class uses a lot of Win32 calls // that will lose the value of GLE and the code that called the log function // will have lost the thread error value when the log call returns. - base::internal::ScopedClearLastError last_error_; + base::ScopedClearLastError last_error_; DISALLOW_COPY_AND_ASSIGN(LogMessage); }; @@ -955,7 +615,7 @@ BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code); #if defined(OS_WIN) // Appends a formatted system message of the GetLastError() type. -class BASE_EXPORT Win32ErrorLogMessage { +class BASE_EXPORT Win32ErrorLogMessage : public LogMessage { public: Win32ErrorLogMessage(const char* file, int line, @@ -963,19 +623,16 @@ class BASE_EXPORT Win32ErrorLogMessage { SystemErrorCode err); // Appends the error message before destructing the encapsulated class. - ~Win32ErrorLogMessage(); - - std::ostream& stream() { return log_message_.stream(); } + ~Win32ErrorLogMessage() override; private: SystemErrorCode err_; - LogMessage log_message_; DISALLOW_COPY_AND_ASSIGN(Win32ErrorLogMessage); }; #elif defined(OS_POSIX) || defined(OS_FUCHSIA) // Appends a formatted system message of the errno type -class BASE_EXPORT ErrnoLogMessage { +class BASE_EXPORT ErrnoLogMessage : public LogMessage { public: ErrnoLogMessage(const char* file, int line, @@ -983,13 +640,10 @@ class BASE_EXPORT ErrnoLogMessage { SystemErrorCode err); // Appends the error message before destructing the encapsulated class. - ~ErrnoLogMessage(); - - std::ostream& stream() { return log_message_.stream(); } + ~ErrnoLogMessage() override; private: SystemErrorCode err_; - LogMessage log_message_; DISALLOW_COPY_AND_ASSIGN(ErrnoLogMessage); }; @@ -1015,12 +669,6 @@ BASE_EXPORT void RawLog(int level, const char* message); #define RAW_LOG(level, message) \ ::logging::RawLog(::logging::LOG_##level, message) -#define RAW_CHECK(condition) \ - do { \ - if (!(condition)) \ - ::logging::RawLog(::logging::LOG_FATAL, \ - "Check failed: " #condition "\n"); \ - } while (0) #if defined(OS_WIN) // Returns true if logging to file is enabled. @@ -1053,25 +701,4 @@ inline std::ostream& operator<<(std::ostream& out, const std::wstring& wstr) { } } // namespace std -// The NOTIMPLEMENTED() macro annotates codepaths which have not been -// implemented yet. If output spam is a serious concern, -// NOTIMPLEMENTED_LOG_ONCE can be used. - -#if defined(COMPILER_GCC) -// On Linux, with GCC, we can use __PRETTY_FUNCTION__ to get the demangled name -// of the current function in the NOTIMPLEMENTED message. -#define NOTIMPLEMENTED_MSG "Not implemented reached in " << __PRETTY_FUNCTION__ -#else -#define NOTIMPLEMENTED_MSG "NOT IMPLEMENTED" -#endif - -#define NOTIMPLEMENTED() DLOG(ERROR) << NOTIMPLEMENTED_MSG -#define NOTIMPLEMENTED_LOG_ONCE() \ - do { \ - static bool logged_once = false; \ - DLOG_IF(ERROR, !logged_once) << NOTIMPLEMENTED_MSG; \ - logged_once = true; \ - } while (0); \ - EAT_STREAM_PARAMETERS - #endif // BASE_LOGGING_H_ diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc index 4874f8e10f6..90378f09db7 100644 --- a/chromium/base/logging_unittest.cc +++ b/chromium/base/logging_unittest.cc @@ -14,6 +14,7 @@ #include "base/run_loop.h" #include "base/sanitizer_buildflags.h" #include "base/strings/string_piece.h" +#include "base/test/bind_test_util.h" #include "base/test/scoped_feature_list.h" #include "base/test/task_environment.h" #include "build/build_config.h" @@ -63,18 +64,6 @@ namespace { using ::testing::Return; using ::testing::_; -// Needs to be global since log assert handlers can't maintain state. -int g_log_sink_call_count = 0; - -#if !defined(OFFICIAL_BUILD) || defined(DCHECK_ALWAYS_ON) || !defined(NDEBUG) -void LogSink(const char* file, - int line, - const base::StringPiece message, - const base::StringPiece stack_trace) { - ++g_log_sink_call_count; -} -#endif - // Class to make sure any manipulations we do to the min log level are // contained (i.e., do not affect other unit tests). class LogStateSaver { @@ -83,7 +72,6 @@ class LogStateSaver { ~LogStateSaver() { SetMinLogLevel(old_min_log_level_); - g_log_sink_call_count = 0; } private: @@ -375,33 +363,6 @@ TEST_F(LoggingTest, DuplicateLogFile) { } #endif // defined(OS_CHROMEOS) -// Official builds have CHECKs directly call BreakDebugger. -#if !defined(OFFICIAL_BUILD) - -// https://crbug.com/709067 tracks test flakiness on iOS. -#if defined(OS_IOS) -#define MAYBE_CheckStreamsAreLazy DISABLED_CheckStreamsAreLazy -#else -#define MAYBE_CheckStreamsAreLazy CheckStreamsAreLazy -#endif -TEST_F(LoggingTest, MAYBE_CheckStreamsAreLazy) { - MockLogSource mock_log_source, uncalled_mock_log_source; - EXPECT_CALL(mock_log_source, Log()).Times(8). - WillRepeatedly(Return("check message")); - EXPECT_CALL(uncalled_mock_log_source, Log()).Times(0); - - ScopedLogAssertHandler scoped_assert_handler(base::BindRepeating(LogSink)); - - CHECK(mock_log_source.Log()) << uncalled_mock_log_source.Log(); - PCHECK(!mock_log_source.Log()) << mock_log_source.Log(); - CHECK_EQ(mock_log_source.Log(), mock_log_source.Log()) - << uncalled_mock_log_source.Log(); - CHECK_NE(mock_log_source.Log(), mock_log_source.Log()) - << mock_log_source.Log(); -} - -#endif - #if defined(OFFICIAL_BUILD) && defined(OS_WIN) NOINLINE void CheckContainingFunc(int death_location) { CHECK(death_location != 1); @@ -688,160 +649,6 @@ TEST_F(LoggingTest, DebugLoggingReleaseBehavior) { DVLOG_IF(1, debug_only_variable) << "test"; } -TEST_F(LoggingTest, DcheckStreamsAreLazy) { - MockLogSource mock_log_source; - EXPECT_CALL(mock_log_source, Log()).Times(0); -#if DCHECK_IS_ON() - DCHECK(true) << mock_log_source.Log(); - DCHECK_EQ(0, 0) << mock_log_source.Log(); -#else - DCHECK(mock_log_source.Log()) << mock_log_source.Log(); - DPCHECK(mock_log_source.Log()) << mock_log_source.Log(); - DCHECK_EQ(0, 0) << mock_log_source.Log(); - DCHECK_EQ(mock_log_source.Log(), static_cast<const char*>(nullptr)) - << mock_log_source.Log(); -#endif -} - -void DcheckEmptyFunction1() { - // Provide a body so that Release builds do not cause the compiler to - // optimize DcheckEmptyFunction1 and DcheckEmptyFunction2 as a single - // function, which breaks the Dcheck tests below. - LOG(INFO) << "DcheckEmptyFunction1"; -} -void DcheckEmptyFunction2() {} - -#if defined(DCHECK_IS_CONFIGURABLE) -class ScopedDcheckSeverity { - public: - ScopedDcheckSeverity(LogSeverity new_severity) : old_severity_(LOG_DCHECK) { - LOG_DCHECK = new_severity; - } - - ~ScopedDcheckSeverity() { LOG_DCHECK = old_severity_; } - - private: - LogSeverity old_severity_; -}; -#endif // defined(DCHECK_IS_CONFIGURABLE) - -// https://crbug.com/709067 tracks test flakiness on iOS. -#if defined(OS_IOS) -#define MAYBE_Dcheck DISABLED_Dcheck -#else -#define MAYBE_Dcheck Dcheck -#endif -TEST_F(LoggingTest, MAYBE_Dcheck) { -#if defined(DCHECK_IS_CONFIGURABLE) - // DCHECKs are enabled, and LOG_DCHECK is mutable, but defaults to non-fatal. - // Set it to LOG_FATAL to get the expected behavior from the rest of this - // test. - ScopedDcheckSeverity dcheck_severity(LOG_FATAL); -#endif // defined(DCHECK_IS_CONFIGURABLE) - -#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON) - // Release build. - EXPECT_FALSE(DCHECK_IS_ON()); - EXPECT_FALSE(DLOG_IS_ON(DCHECK)); -#elif defined(NDEBUG) && defined(DCHECK_ALWAYS_ON) - // Release build with real DCHECKS. - ScopedLogAssertHandler scoped_assert_handler(base::BindRepeating(LogSink)); - EXPECT_TRUE(DCHECK_IS_ON()); - EXPECT_TRUE(DLOG_IS_ON(DCHECK)); -#else - // Debug build. - ScopedLogAssertHandler scoped_assert_handler(base::BindRepeating(LogSink)); - EXPECT_TRUE(DCHECK_IS_ON()); - EXPECT_TRUE(DLOG_IS_ON(DCHECK)); -#endif - - // DCHECKs are fatal iff they're compiled in DCHECK_IS_ON() and the DCHECK - // log level is set to fatal. - const bool dchecks_are_fatal = DCHECK_IS_ON() && LOG_DCHECK == LOG_FATAL; - EXPECT_EQ(0, g_log_sink_call_count); - DCHECK(false); - EXPECT_EQ(dchecks_are_fatal ? 1 : 0, g_log_sink_call_count); - DPCHECK(false); - EXPECT_EQ(dchecks_are_fatal ? 2 : 0, g_log_sink_call_count); - DCHECK_EQ(0, 1); - EXPECT_EQ(dchecks_are_fatal ? 3 : 0, g_log_sink_call_count); - - // Test DCHECK on std::nullptr_t - g_log_sink_call_count = 0; - const void* p_null = nullptr; - const void* p_not_null = &p_null; - DCHECK_EQ(p_null, nullptr); - DCHECK_EQ(nullptr, p_null); - DCHECK_NE(p_not_null, nullptr); - DCHECK_NE(nullptr, p_not_null); - EXPECT_EQ(0, g_log_sink_call_count); - - // Test DCHECK on a scoped enum. - enum class Animal { DOG, CAT }; - DCHECK_EQ(Animal::DOG, Animal::DOG); - EXPECT_EQ(0, g_log_sink_call_count); - DCHECK_EQ(Animal::DOG, Animal::CAT); - EXPECT_EQ(dchecks_are_fatal ? 1 : 0, g_log_sink_call_count); - - // Test DCHECK on functions and function pointers. - g_log_sink_call_count = 0; - struct MemberFunctions { - void MemberFunction1() { - // See the comment in DcheckEmptyFunction1(). - LOG(INFO) << "Do not merge with MemberFunction2."; - } - void MemberFunction2() {} - }; - void (MemberFunctions::*mp1)() = &MemberFunctions::MemberFunction1; - void (MemberFunctions::*mp2)() = &MemberFunctions::MemberFunction2; - void (*fp1)() = DcheckEmptyFunction1; - void (*fp2)() = DcheckEmptyFunction2; - void (*fp3)() = DcheckEmptyFunction1; - DCHECK_EQ(fp1, fp3); - EXPECT_EQ(0, g_log_sink_call_count); - DCHECK_EQ(mp1, &MemberFunctions::MemberFunction1); - EXPECT_EQ(0, g_log_sink_call_count); - DCHECK_EQ(mp2, &MemberFunctions::MemberFunction2); - EXPECT_EQ(0, g_log_sink_call_count); - DCHECK_EQ(fp1, fp2); - EXPECT_EQ(dchecks_are_fatal ? 1 : 0, g_log_sink_call_count); - DCHECK_EQ(mp2, &MemberFunctions::MemberFunction1); - EXPECT_EQ(dchecks_are_fatal ? 2 : 0, g_log_sink_call_count); -} - -TEST_F(LoggingTest, DcheckReleaseBehavior) { - int some_variable = 1; - // These should still reference |some_variable| so we don't get - // unused variable warnings. - DCHECK(some_variable) << "test"; - DPCHECK(some_variable) << "test"; - DCHECK_EQ(some_variable, 1) << "test"; -} - -TEST_F(LoggingTest, DCheckEqStatements) { - bool reached = false; - if (false) - DCHECK_EQ(false, true); // Unreached. - else - DCHECK_EQ(true, reached = true); // Reached, passed. - ASSERT_EQ(DCHECK_IS_ON() ? true : false, reached); - - if (false) - DCHECK_EQ(false, true); // Unreached. -} - -TEST_F(LoggingTest, CheckEqStatements) { - bool reached = false; - if (false) - CHECK_EQ(false, true); // Unreached. - else - CHECK_EQ(true, reached = true); // Reached, passed. - ASSERT_TRUE(reached); - - if (false) - CHECK_EQ(false, true); // Unreached. -} - TEST_F(LoggingTest, NestedLogAssertHandlers) { ::testing::InSequence dummy; ::testing::StrictMock<MockLogAssertHandler> handler_a, handler_b; @@ -898,69 +705,16 @@ namespace nested_test { } } // namespace nested_test -#if defined(DCHECK_IS_CONFIGURABLE) -TEST_F(LoggingTest, ConfigurableDCheck) { - // Verify that DCHECKs default to non-fatal in configurable-DCHECK builds. - // Note that we require only that DCHECK is non-fatal by default, rather - // than requiring that it be exactly INFO, ERROR, etc level. - EXPECT_LT(LOG_DCHECK, LOG_FATAL); - DCHECK(false); - - // Verify that DCHECK* aren't hard-wired to crash on failure. - LOG_DCHECK = LOG_INFO; - DCHECK(false); - DCHECK_EQ(1, 2); - - // Verify that DCHECK does crash if LOG_DCHECK is set to LOG_FATAL. - LOG_DCHECK = LOG_FATAL; - - ::testing::StrictMock<MockLogAssertHandler> handler; - EXPECT_CALL(handler, HandleLogAssert(_, _, _, _)).Times(2); - { - logging::ScopedLogAssertHandler scoped_handler_b(base::BindRepeating( - &MockLogAssertHandler::HandleLogAssert, base::Unretained(&handler))); - DCHECK(false); - DCHECK_EQ(1, 2); - } -} - -TEST_F(LoggingTest, ConfigurableDCheckFeature) { - // Initialize FeatureList with and without DcheckIsFatal, and verify the - // value of LOG_DCHECK. Note that we don't require that DCHECK take a - // specific value when the feature is off, only that it is non-fatal. - - { - base::test::ScopedFeatureList feature_list; - feature_list.InitFromCommandLine("DcheckIsFatal", ""); - EXPECT_EQ(LOG_DCHECK, LOG_FATAL); - } - - { - base::test::ScopedFeatureList feature_list; - feature_list.InitFromCommandLine("", "DcheckIsFatal"); - EXPECT_LT(LOG_DCHECK, LOG_FATAL); - } - - // The default case is last, so we leave LOG_DCHECK in the default state. - { - base::test::ScopedFeatureList feature_list; - feature_list.InitFromCommandLine("", ""); - EXPECT_LT(LOG_DCHECK, LOG_FATAL); - } -} -#endif // defined(DCHECK_IS_CONFIGURABLE) - #if defined(OS_FUCHSIA) -class TestLogListener : public fuchsia::logger::testing::LogListener_TestBase { +class TestLogListenerSafe + : public fuchsia::logger::testing::LogListenerSafe_TestBase { public: - TestLogListener() = default; - ~TestLogListener() override = default; + TestLogListenerSafe() = default; + ~TestLogListenerSafe() override = default; - void RunUntilDone() { - base::RunLoop loop; - dump_logs_done_quit_closure_ = loop.QuitClosure(); - loop.Run(); + void set_on_dump_logs_done(base::OnceClosure on_dump_logs_done) { + on_dump_logs_done_ = std::move(on_dump_logs_done); } bool DidReceiveString(base::StringPiece message, @@ -975,24 +729,26 @@ class TestLogListener : public fuchsia::logger::testing::LogListener_TestBase { } // LogListener implementation. - void LogMany(std::vector<fuchsia::logger::LogMessage> messages) override { + void LogMany(std::vector<fuchsia::logger::LogMessage> messages, + LogManyCallback callback) override { log_messages_.insert(log_messages_.end(), std::make_move_iterator(messages.begin()), std::make_move_iterator(messages.end())); + callback(); } - void Done() override { std::move(dump_logs_done_quit_closure_).Run(); } + void Done() override { std::move(on_dump_logs_done_).Run(); } void NotImplemented_(const std::string& name) override { - NOTIMPLEMENTED() << name; + ADD_FAILURE() << "NotImplemented_: " << name; } private: - fuchsia::logger::LogListenerPtr log_listener_; + fuchsia::logger::LogListenerSafePtr log_listener_; std::vector<fuchsia::logger::LogMessage> log_messages_; - base::OnceClosure dump_logs_done_quit_closure_; + base::OnceClosure on_dump_logs_done_; - DISALLOW_COPY_AND_ASSIGN(TestLogListener); + DISALLOW_COPY_AND_ASSIGN(TestLogListenerSafe); }; // Verifies that calling the log macro goes to the Fuchsia system logs. @@ -1000,11 +756,23 @@ TEST_F(LoggingTest, FuchsiaSystemLogging) { const char kLogMessage[] = "system log!"; LOG(ERROR) << kLogMessage; - TestLogListener listener; - fidl::Binding<fuchsia::logger::LogListener> binding(&listener); + TestLogListenerSafe listener; + fidl::Binding<fuchsia::logger::LogListenerSafe> binding(&listener); fuchsia::logger::LogMessage logged_message; - do { + + base::RunLoop wait_for_message_loop; + + // |dump_logs| checks whether the expected log line has been received yet, + // and invokes DumpLogs() if not. It passes itself as the completion callback, + // so that when the call completes it can check again for the expected message + // and re-invoke DumpLogs(), or quit the loop, as appropriate. + base::RepeatingClosure dump_logs = base::BindLambdaForTesting([&]() { + if (listener.DidReceiveString(kLogMessage, &logged_message)) { + wait_for_message_loop.Quit(); + return; + } + std::unique_ptr<fuchsia::logger::LogFilterOptions> options = std::make_unique<fuchsia::logger::LogFilterOptions>(); options->tags = {"base_unittests__exec"}; @@ -1012,9 +780,15 @@ TEST_F(LoggingTest, FuchsiaSystemLogging) { base::fuchsia::ComponentContextForCurrentProcess() ->svc() ->Connect<fuchsia::logger::Log>(); - logger->DumpLogs(binding.NewBinding(), std::move(options)); - listener.RunUntilDone(); - } while (!listener.DidReceiveString(kLogMessage, &logged_message)); + listener.set_on_dump_logs_done(dump_logs); + logger->DumpLogsSafe(binding.NewBinding(), std::move(options)); + }); + + // Start the first DumpLogs() call. + dump_logs.Run(); + + // Run until kLogMessage is received. + wait_for_message_loop.Run(); EXPECT_EQ(logged_message.severity, static_cast<int32_t>(fuchsia::logger::LogLevelFilter::ERROR)); @@ -1125,46 +899,6 @@ TEST_F(LoggingTest, LogMessageMarkersOnStack) { } #endif // !defined(ADDRESS_SANITIZER) -const char* kToStringResult = "to_string"; -const char* kOstreamResult = "ostream"; - -struct StructWithOstream {}; - -std::ostream& operator<<(std::ostream& out, const StructWithOstream&) { - return out << kOstreamResult; -} - -TEST(MakeCheckOpValueStringTest, HasOnlyOstream) { - std::ostringstream oss; - logging::MakeCheckOpValueString(&oss, StructWithOstream()); - EXPECT_EQ(kOstreamResult, oss.str()); -} - -struct StructWithToString { - std::string ToString() const { return kToStringResult; } -}; - -TEST(MakeCheckOpValueStringTest, HasOnlyToString) { - std::ostringstream oss; - logging::MakeCheckOpValueString(&oss, StructWithToString()); - EXPECT_EQ(kToStringResult, oss.str()); -} - -struct StructWithToStringAndOstream { - std::string ToString() const { return kToStringResult; } -}; - -std::ostream& operator<<(std::ostream& out, - const StructWithToStringAndOstream&) { - return out << kOstreamResult; -} - -TEST(MakeCheckOpValueStringTest, HasOstreamAndToString) { - std::ostringstream oss; - logging::MakeCheckOpValueString(&oss, StructWithToStringAndOstream()); - EXPECT_EQ(kOstreamResult, oss.str()); -} - } // namespace } // namespace logging diff --git a/chromium/base/mac/bundle_locations.mm b/chromium/base/mac/bundle_locations.mm index 54021b85ee0..3a6ae7ccbc2 100644 --- a/chromium/base/mac/bundle_locations.mm +++ b/chromium/base/mac/bundle_locations.mm @@ -4,7 +4,7 @@ #include "base/mac/bundle_locations.h" -#include "base/logging.h" +#include "base/check.h" #include "base/mac/foundation_util.h" #include "base/strings/sys_string_conversions.h" diff --git a/chromium/base/mac/foundation_util.h b/chromium/base/mac/foundation_util.h index a10b47b05bb..8d2f33a3a9f 100644 --- a/chromium/base/mac/foundation_util.h +++ b/chromium/base/mac/foundation_util.h @@ -36,10 +36,10 @@ class UIFont; // Adapted from NSPathUtilities.h and NSObjCRuntime.h. #if __LP64__ || NS_BUILD_32_LIKE_64 -typedef enum NSSearchPathDirectory : unsigned long NSSearchPathDirectory; +enum NSSearchPathDirectory : unsigned long; typedef unsigned long NSSearchPathDomainMask; #else -typedef enum NSSearchPathDirectory : unsigned int NSSearchPathDirectory; +enum NSSearchPathDirectory : unsigned int; typedef unsigned int NSSearchPathDomainMask; #endif diff --git a/chromium/base/mac/foundation_util.mm b/chromium/base/mac/foundation_util.mm index 2a83d4d8158..f37884f6f70 100644 --- a/chromium/base/mac/foundation_util.mm +++ b/chromium/base/mac/foundation_util.mm @@ -27,6 +27,11 @@ CFTypeID SecKeyGetTypeID(); #if !defined(OS_IOS) CFTypeID SecACLGetTypeID(); CFTypeID SecTrustedApplicationGetTypeID(); +// The NSFont/CTFont toll-free bridging is broken before 10.15. +// http://www.openradar.me/15341349 rdar://15341349 +// +// TODO(https://crbug.com/1076527): This is fixed in 10.15. When 10.15 is the +// minimum OS for Chromium, remove this SPI declaration. Boolean _CFIsObjC(CFTypeID typeID, CFTypeRef obj); #endif } // extern "C" @@ -308,9 +313,13 @@ CF_TO_NS_CAST_DEFN(CFURL, NSURL) #if defined(OS_IOS) CF_TO_NS_CAST_DEFN(CTFont, UIFont) #else -// The NSFont/CTFont toll-free bridging is broken when it comes to type -// checking, so do some special-casing. +// The NSFont/CTFont toll-free bridging is broken before 10.15. // http://www.openradar.me/15341349 rdar://15341349 +// +// TODO(https://crbug.com/1076527): This is fixed in 10.15. When 10.15 is the +// minimum OS for Chromium, remove this specialization and replace it with just: +// +// CF_TO_NS_CAST_DEFN(CTFont, NSFont) NSFont* CFToNSCast(CTFontRef cf_val) { NSFont* ns_val = const_cast<NSFont*>(reinterpret_cast<const NSFont*>(cf_val)); @@ -373,9 +382,12 @@ CF_CAST_DEFN(CTRun) #if defined(OS_IOS) CF_CAST_DEFN(CTFont) #else -// The NSFont/CTFont toll-free bridging is broken when it comes to type -// checking, so do some special-casing. +// The NSFont/CTFont toll-free bridging is broken before 10.15. // http://www.openradar.me/15341349 rdar://15341349 +// +// TODO(https://crbug.com/1076527): This is fixed in 10.15. When 10.15 is the +// minimum OS for Chromium, remove this specialization and the #if IOS above, +// and rely just on the one CF_CAST_DEFN(CTFont). template<> CTFontRef CFCast<CTFontRef>(const CFTypeRef& cf_val) { if (cf_val == NULL) { diff --git a/chromium/base/mac/mac_logging.h b/chromium/base/mac/mac_logging.h index 30e43ead294..72315e9c81b 100644 --- a/chromium/base/mac/mac_logging.h +++ b/chromium/base/mac/mac_logging.h @@ -38,7 +38,7 @@ class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage { int line, LogSeverity severity, OSStatus status); - ~OSStatusLogMessage(); + ~OSStatusLogMessage() override; private: OSStatus status_; diff --git a/chromium/base/mac/mach_logging.h b/chromium/base/mac/mach_logging.h index 59ab762c3c4..c0247d21f5e 100644 --- a/chromium/base/mac/mach_logging.h +++ b/chromium/base/mac/mach_logging.h @@ -39,7 +39,7 @@ class BASE_EXPORT MachLogMessage : public logging::LogMessage { int line, LogSeverity severity, mach_error_t mach_err); - ~MachLogMessage(); + ~MachLogMessage() override; private: mach_error_t mach_err_; @@ -106,7 +106,7 @@ class BASE_EXPORT BootstrapLogMessage : public logging::LogMessage { int line, LogSeverity severity, kern_return_t bootstrap_err); - ~BootstrapLogMessage(); + ~BootstrapLogMessage() override; private: kern_return_t bootstrap_err_; diff --git a/chromium/base/mac/objc_release_properties.mm b/chromium/base/mac/objc_release_properties.mm index d0006cfe313..f9d19a4e66b 100644 --- a/chromium/base/mac/objc_release_properties.mm +++ b/chromium/base/mac/objc_release_properties.mm @@ -8,7 +8,7 @@ #include <objc/runtime.h> -#include "base/logging.h" +#include "base/check.h" #include "base/memory/free_deleter.h" namespace { diff --git a/chromium/base/mac/scoped_nsautorelease_pool.mm b/chromium/base/mac/scoped_nsautorelease_pool.mm index e542ca86b2f..48f10381bf0 100644 --- a/chromium/base/mac/scoped_nsautorelease_pool.mm +++ b/chromium/base/mac/scoped_nsautorelease_pool.mm @@ -6,7 +6,7 @@ #import <Foundation/Foundation.h> -#include "base/logging.h" +#include "base/check.h" namespace base { namespace mac { diff --git a/chromium/base/mac/scoped_objc_class_swizzler.mm b/chromium/base/mac/scoped_objc_class_swizzler.mm index 0065ed7a97f..cfb92bae221 100644 --- a/chromium/base/mac/scoped_objc_class_swizzler.mm +++ b/chromium/base/mac/scoped_objc_class_swizzler.mm @@ -6,7 +6,7 @@ #include <string.h> -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace mac { diff --git a/chromium/base/mac/scoped_sending_event.mm b/chromium/base/mac/scoped_sending_event.mm index c3813d8ae66..35a3a43c84a 100644 --- a/chromium/base/mac/scoped_sending_event.mm +++ b/chromium/base/mac/scoped_sending_event.mm @@ -4,7 +4,7 @@ #import "base/mac/scoped_sending_event.h" -#include "base/logging.h" +#include "base/check.h" namespace base { namespace mac { diff --git a/chromium/base/memory/aligned_memory.h b/chromium/base/memory/aligned_memory.h index a242b730be5..d1cba0c7bb1 100644 --- a/chromium/base/memory/aligned_memory.h +++ b/chromium/base/memory/aligned_memory.h @@ -12,6 +12,8 @@ #include "base/base_export.h" #include "base/compiler_specific.h" +#include "base/logging.h" +#include "base/process/process_metrics.h" #include "build/build_config.h" #if defined(COMPILER_MSVC) @@ -55,6 +57,32 @@ struct AlignedFreeDeleter { } }; +#ifndef __has_builtin +#define __has_builtin(x) 0 // Compatibility with non-clang compilers. +#endif + +inline bool IsAligned(uintptr_t val, size_t alignment) { + // If the compiler supports builtin alignment checks prefer them. +#if __has_builtin(__builtin_is_aligned) + return __builtin_is_aligned(val, alignment); +#else + DCHECK(!((alignment - 1) & alignment)) + << alignment << " is not a power of two"; + return (val & (alignment - 1)) == 0; +#endif +} + +inline bool IsAligned(void* val, size_t alignment) { + return IsAligned(reinterpret_cast<uintptr_t>(val), alignment); +} + +template <typename Type> +inline bool IsPageAligned(Type val) { + static_assert(std::is_integral<Type>::value || std::is_pointer<Type>::value, + "Integral or pointer type required"); + return base::IsAligned(val, base::GetPageSize()); +} + } // namespace base #endif // BASE_MEMORY_ALIGNED_MEMORY_H_ diff --git a/chromium/base/memory/aligned_memory_unittest.cc b/chromium/base/memory/aligned_memory_unittest.cc index e354f38b75c..e067b4cbbc2 100644 --- a/chromium/base/memory/aligned_memory_unittest.cc +++ b/chromium/base/memory/aligned_memory_unittest.cc @@ -9,30 +9,27 @@ #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" -#define EXPECT_ALIGNED(ptr, align) \ - EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1)) - namespace base { TEST(AlignedMemoryTest, DynamicAllocation) { void* p = AlignedAlloc(8, 8); EXPECT_TRUE(p); - EXPECT_ALIGNED(p, 8); + EXPECT_TRUE(IsAligned(p, 8)); AlignedFree(p); p = AlignedAlloc(8, 16); EXPECT_TRUE(p); - EXPECT_ALIGNED(p, 16); + EXPECT_TRUE(IsAligned(p, 16)); AlignedFree(p); p = AlignedAlloc(8, 256); EXPECT_TRUE(p); - EXPECT_ALIGNED(p, 256); + EXPECT_TRUE(IsAligned(p, 256)); AlignedFree(p); p = AlignedAlloc(8, 4096); EXPECT_TRUE(p); - EXPECT_ALIGNED(p, 4096); + EXPECT_TRUE(IsAligned(p, 4096)); AlignedFree(p); } @@ -40,7 +37,44 @@ TEST(AlignedMemoryTest, ScopedDynamicAllocation) { std::unique_ptr<float, AlignedFreeDeleter> p( static_cast<float*>(AlignedAlloc(8, 8))); EXPECT_TRUE(p.get()); - EXPECT_ALIGNED(p.get(), 8); + EXPECT_TRUE(IsAligned(p.get(), 8)); +} + +TEST(AlignedMemoryTest, IsAligned) { + // Check alignment around powers of two. + for (int i = 0; i < 64; ++i) { + const uint64_t n = static_cast<uint64_t>(1) << i; + + // Walk back down all lower powers of two checking alignment. + for (int j = i - 1; j >= 0; --j) { + // n is aligned on all powers of two less than or equal to 2^i. + EXPECT_TRUE(IsAligned(n, n >> j)) + << "Expected " << n << " to be " << (n >> j) << " aligned"; + + // Also, n - 1 should not be aligned on ANY lower power of two except 1 + // (but since we're starting from i - 1 we don't test that case here. + EXPECT_FALSE(IsAligned(n - 1, n >> j)) + << "Expected " << (n - 1) << " to NOT be " << (n >> j) << " aligned"; + } + } + + // And a few hard coded smoke tests for completeness: + EXPECT_TRUE(IsAligned(4, 2)); + EXPECT_TRUE(IsAligned(8, 4)); + EXPECT_TRUE(IsAligned(8, 2)); + EXPECT_TRUE(IsAligned(0x1000, 4 << 10)); + EXPECT_TRUE(IsAligned(0x2000, 8 << 10)); + EXPECT_TRUE(IsAligned(1, 1)); + EXPECT_TRUE(IsAligned(7, 1)); + EXPECT_TRUE(IsAligned(reinterpret_cast<void*>(0x1000), 4 << 10)); + EXPECT_TRUE(IsAligned(reinterpret_cast<int*>(0x1000), 4 << 10)); + + EXPECT_FALSE(IsAligned(3, 2)); + EXPECT_FALSE(IsAligned(7, 4)); + EXPECT_FALSE(IsAligned(7, 2)); + EXPECT_FALSE(IsAligned(0x1001, 4 << 10)); + EXPECT_FALSE(IsAligned(0x999, 8 << 10)); + EXPECT_FALSE(IsAligned(7, 8)); } } // namespace base diff --git a/chromium/base/memory/checked_ptr.h b/chromium/base/memory/checked_ptr.h new file mode 100644 index 00000000000..dc81e98130f --- /dev/null +++ b/chromium/base/memory/checked_ptr.h @@ -0,0 +1,274 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_CHECKED_PTR_H_ +#define BASE_MEMORY_CHECKED_PTR_H_ + +#include <cstddef> +#include <cstdint> +#include <utility> + +#include "base/compiler_specific.h" + +namespace base { + +// NOTE: All methods should be ALWAYS_INLINE. CheckedPtr is meant to be a +// lightweight replacement of a raw pointer, hence performance is critical. + +namespace internal { +// These classes/structures are part of the CheckedPtr implementation. +// DO NOT USE THESE CLASSES DIRECTLY YOURSELF. + +struct CheckedPtrNoOpImpl { + // Wraps a pointer, and returns its uintptr_t representation. + static ALWAYS_INLINE uintptr_t WrapRawPtr(const void* const_ptr) { + return reinterpret_cast<uintptr_t>(const_ptr); + } + + // Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a + // constexpr. + static constexpr ALWAYS_INLINE uintptr_t GetWrappedNullPtr() { + // This relies on nullptr and 0 being equal in the eyes of reinterpret_cast, + // which apparently isn't true in all environments. + return 0; + } + + // Unwraps the pointer's uintptr_t representation, while asserting that memory + // hasn't been freed. The function is allowed to crash on nullptr. + static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference( + uintptr_t wrapped_ptr) { + return reinterpret_cast<void*>(wrapped_ptr); + } + + // Unwraps the pointer's uintptr_t representation, while asserting that memory + // hasn't been freed. The function must handle nullptr gracefully. + static ALWAYS_INLINE void* SafelyUnwrapPtrForExtraction( + uintptr_t wrapped_ptr) { + return reinterpret_cast<void*>(wrapped_ptr); + } + + // Unwraps the pointer's uintptr_t representation, without making an assertion + // on whether memory was freed or not. + static ALWAYS_INLINE void* UnsafelyUnwrapPtrForComparison( + uintptr_t wrapped_ptr) { + return reinterpret_cast<void*>(wrapped_ptr); + } + + // Advance the wrapped pointer by |delta| bytes. + static ALWAYS_INLINE uintptr_t Advance(uintptr_t wrapped_ptr, size_t delta) { + return wrapped_ptr + delta; + } + + // This is for accounting only, used by unit tests. + static ALWAYS_INLINE void IncrementSwapCountForTest() {} +}; + +template <typename T> +struct DereferencedPointerType { + using Type = decltype(*std::declval<T*>()); +}; +// This explicitly doesn't define any type aliases, since dereferencing void is +// invalid. +template <> +struct DereferencedPointerType<void> {}; + +} // namespace internal + +// DO NOT USE! EXPERIMENTAL ONLY! This is helpful for local testing! +// +// CheckedPtr is meant to be a pointer wrapper, that will crash on +// Use-After-Free (UaF) to prevent security issues. This is very much in the +// experimental phase. More context in: +// https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg +// +// For now, CheckedPtr is a no-op wrapper to aid local testing. +// +// Goals for this API: +// 1. Minimize amount of caller-side changes as much as physically possible. +// 2. Keep this class as small as possible, while still satisfying goal #1 (i.e. +// we aren't striving to maximize compatibility with raw pointers, merely +// adding support for cases encountered so far). +template <typename T, typename Impl = internal::CheckedPtrNoOpImpl> +class CheckedPtr { + public: + // CheckedPtr can be trivially default constructed (leaving |wrapped_ptr_| + // uninitialized). This is needed for compatibility with raw pointers. + // + // TODO(lukasza): Always initialize |wrapped_ptr_|. Fix resulting build + // errors. Analyze performance impact. + constexpr CheckedPtr() noexcept = default; + + // Deliberately implicit, because CheckedPtr is supposed to resemble raw ptr. + // NOLINTNEXTLINE(runtime/explicit) + constexpr ALWAYS_INLINE CheckedPtr(std::nullptr_t) noexcept + : wrapped_ptr_(Impl::GetWrappedNullPtr()) {} + + // Deliberately implicit, because CheckedPtr is supposed to resemble raw ptr. + // NOLINTNEXTLINE(runtime/explicit) + ALWAYS_INLINE CheckedPtr(T* p) noexcept : wrapped_ptr_(Impl::WrapRawPtr(p)) {} + + // In addition to nullptr_t ctor above, CheckedPtr needs to have these + // as |=default| or |constexpr| to avoid hitting -Wglobal-constructors in + // cases like this: + // struct SomeStruct { int int_field; CheckedPtr<int> ptr_field; }; + // SomeStruct g_global_var = { 123, nullptr }; + CheckedPtr(const CheckedPtr&) noexcept = default; + CheckedPtr(CheckedPtr&&) noexcept = default; + CheckedPtr& operator=(const CheckedPtr&) noexcept = default; + CheckedPtr& operator=(CheckedPtr&&) noexcept = default; + + ALWAYS_INLINE CheckedPtr& operator=(T* p) noexcept { + wrapped_ptr_ = Impl::WrapRawPtr(p); + return *this; + } + + ~CheckedPtr() = default; + + // Avoid using. The goal of CheckedPtr is to be as close to raw pointer as + // possible, so use it only if absolutely necessary (e.g. for const_cast). + ALWAYS_INLINE T* get() const { return GetForExtraction(); } + + explicit ALWAYS_INLINE operator bool() const { + return wrapped_ptr_ != Impl::GetWrappedNullPtr(); + } + + // Use SFINAE to avoid defining |operator*| for T=void, which wouldn't compile + // due to |void&|. + template <typename U = T, + typename V = typename internal::DereferencedPointerType<U>::Type> + ALWAYS_INLINE V& operator*() const { + return *GetForDereference(); + } + ALWAYS_INLINE T* operator->() const { return GetForDereference(); } + // Deliberately implicit, because CheckedPtr is supposed to resemble raw ptr. + // NOLINTNEXTLINE(runtime/explicit) + ALWAYS_INLINE operator T*() const { return GetForExtraction(); } + template <typename U> + explicit ALWAYS_INLINE operator U*() const { + return static_cast<U*>(GetForExtraction()); + } + + ALWAYS_INLINE CheckedPtr& operator++() { + wrapped_ptr_ = Impl::Advance(wrapped_ptr_, sizeof(T)); + return *this; + } + + ALWAYS_INLINE CheckedPtr& operator--() { + wrapped_ptr_ = Impl::Advance(wrapped_ptr_, -sizeof(T)); + return *this; + } + + ALWAYS_INLINE CheckedPtr& operator+=(ptrdiff_t delta_elems) { + wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems * sizeof(T)); + return *this; + } + + ALWAYS_INLINE CheckedPtr& operator-=(ptrdiff_t delta_elems) { + return *this += -delta_elems; + } + + ALWAYS_INLINE bool operator==(T* p) const { return GetForComparison() == p; } + ALWAYS_INLINE bool operator!=(T* p) const { return !operator==(p); } + + // Useful for cases like this: + // class Base {}; + // class Derived : public Base {}; + // Derived d; + // CheckedPtr<Derived> derived_ptr = &d; + // Base* base_ptr = &d; + // if (derived_ptr == base_ptr) {...} + // Without these, such comparisons would end up calling |operator T*()|. + template <typename U> + ALWAYS_INLINE bool operator==(U* p) const { + // Add |const| when casting, because |U| may have |const| in it. Even if |T| + // doesn't, comparison between |T*| and |const T*| is fine. + return GetForComparison() == static_cast<std::add_const_t<T>*>(p); + } + template <typename U> + ALWAYS_INLINE bool operator!=(U* p) const { + return !operator==(p); + } + + ALWAYS_INLINE bool operator==(const CheckedPtr& other) const { + return GetForComparison() == other.GetForComparison(); + } + ALWAYS_INLINE bool operator!=(const CheckedPtr& other) const { + return !operator==(other); + } + template <typename U, typename I> + ALWAYS_INLINE bool operator==(const CheckedPtr<U, I>& other) const { + // Add |const| when casting, because |U| may have |const| in it. Even if |T| + // doesn't, comparison between |T*| and |const T*| is fine. + return GetForComparison() == + static_cast<std::add_const_t<T>*>(other.GetForComparison()); + } + template <typename U, typename I> + ALWAYS_INLINE bool operator!=(const CheckedPtr<U, I>& other) const { + return !operator==(other); + } + + ALWAYS_INLINE void swap(CheckedPtr& other) noexcept { + Impl::IncrementSwapCountForTest(); + std::swap(wrapped_ptr_, other.wrapped_ptr_); + } + + private: + // This getter is meant for situations where the pointer is meant to be + // dereferenced. It is allowed to crash on nullptr (it may or may not), + // because it knows that the caller will crash on nullptr. + ALWAYS_INLINE T* GetForDereference() const { + return static_cast<T*>(Impl::SafelyUnwrapPtrForDereference(wrapped_ptr_)); + } + // This getter is meant for situations where the raw pointer is meant to be + // extracted outside of this class, but not necessarily with an intention to + // dereference. It mustn't crash on nullptr. + ALWAYS_INLINE T* GetForExtraction() const { + return static_cast<T*>(Impl::SafelyUnwrapPtrForExtraction(wrapped_ptr_)); + } + // This getter is meant *only* for situations where the pointer is meant to be + // compared (guaranteeing no dereference or extraction outside of this class). + // Any verifications can and should be skipped for performance reasons. + ALWAYS_INLINE T* GetForComparison() const { + return static_cast<T*>(Impl::UnsafelyUnwrapPtrForComparison(wrapped_ptr_)); + } + + // Store the pointer as |uintptr_t|, because depending on implementation, its + // unused bits may be re-purposed to store extra information. + uintptr_t wrapped_ptr_; + + template <typename U, typename V> + friend class CheckedPtr; +}; + +// These are for cases where a raw pointer is on the left hand side. Reverse +// order, so that |CheckedPtr::operator==()| kicks in, which will compare more +// efficiently. Otherwise the CheckedPtr operand would have to be cast to raw +// pointer, which may be more costly. +template <typename T, typename I> +ALWAYS_INLINE bool operator==(T* lhs, const CheckedPtr<T, I>& rhs) { + return rhs == lhs; +} +template <typename T, typename I> +ALWAYS_INLINE bool operator!=(T* lhs, const CheckedPtr<T, I>& rhs) { + return !operator==(lhs, rhs); +} +template <typename T, typename I, typename U> +ALWAYS_INLINE bool operator==(U* lhs, const CheckedPtr<T, I>& rhs) { + return rhs == lhs; +} +template <typename T, typename I, typename U> +ALWAYS_INLINE bool operator!=(U* lhs, const CheckedPtr<T, I>& rhs) { + return !operator==(lhs, rhs); +} + +template <typename T, typename I> +ALWAYS_INLINE void swap(CheckedPtr<T, I>& lhs, CheckedPtr<T, I>& rhs) noexcept { + lhs.swap(rhs); +} + +} // namespace base + +using base::CheckedPtr; + +#endif // BASE_MEMORY_CHECKED_PTR_H_ diff --git a/chromium/base/memory/checked_ptr_unittest.cc b/chromium/base/memory/checked_ptr_unittest.cc new file mode 100644 index 00000000000..32fa63964ec --- /dev/null +++ b/chromium/base/memory/checked_ptr_unittest.cc @@ -0,0 +1,440 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/checked_ptr.h" + +#include <string> +#include <tuple> +#include <type_traits> +#include <utility> + +#include "testing/gtest/include/gtest/gtest.h" + +static_assert(sizeof(CheckedPtr<void>) == sizeof(void*), + "CheckedPtr shouldn't add memory overhead"); +static_assert(sizeof(CheckedPtr<int>) == sizeof(int*), + "CheckedPtr shouldn't add memory overhead"); +static_assert(sizeof(CheckedPtr<std::string>) == sizeof(std::string*), + "CheckedPtr shouldn't add memory overhead"); + +// |is_trivially_copyable| assertion means that arrays/vectors of CheckedPtr can +// be copied by memcpy. +static_assert(std::is_trivially_copyable<CheckedPtr<void>>::value, + "CheckedPtr should be trivially copyable"); +static_assert(std::is_trivially_copyable<CheckedPtr<int>>::value, + "CheckedPtr should be trivially copyable"); +static_assert(std::is_trivially_copyable<CheckedPtr<std::string>>::value, + "CheckedPtr should be trivially copyable"); + +// |is_trivially_default_constructible| assertion helps retain implicit default +// constructors when CheckedPtr is used as a union field. Example of an error +// if this assertion didn't hold: +// +// ../../base/trace_event/trace_arguments.h:249:16: error: call to +// implicitly-deleted default constructor of 'base::trace_event::TraceValue' +// TraceValue ret; +// ^ +// ../../base/trace_event/trace_arguments.h:211:26: note: default +// constructor of 'TraceValue' is implicitly deleted because variant field +// 'as_pointer' has a non-trivial default constructor +// CheckedPtr<const void> as_pointer; +static_assert(std::is_trivially_default_constructible<CheckedPtr<void>>::value, + "CheckedPtr should be trivially default constructible"); +static_assert(std::is_trivially_default_constructible<CheckedPtr<int>>::value, + "CheckedPtr should be trivially default constructible"); +static_assert( + std::is_trivially_default_constructible<CheckedPtr<std::string>>::value, + "CheckedPtr should be trivially default constructible"); + +namespace { + +static int g_get_for_dereference_cnt = INT_MIN; +static int g_get_for_extraction_cnt = INT_MIN; +static int g_get_for_comparison_cnt = INT_MIN; +static int g_checked_ptr_swap_cnt = INT_MIN; + +static void ClearCounters() { + g_get_for_dereference_cnt = 0; + g_get_for_extraction_cnt = 0; + g_get_for_comparison_cnt = 0; + g_checked_ptr_swap_cnt = 0; +} + +struct CheckedPtrCountingNoOpImpl : base::internal::CheckedPtrNoOpImpl { + using Super = base::internal::CheckedPtrNoOpImpl; + + static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference( + uintptr_t wrapped_ptr) { + ++g_get_for_dereference_cnt; + return Super::SafelyUnwrapPtrForDereference(wrapped_ptr); + } + + static ALWAYS_INLINE void* SafelyUnwrapPtrForExtraction( + uintptr_t wrapped_ptr) { + ++g_get_for_extraction_cnt; + return Super::SafelyUnwrapPtrForExtraction(wrapped_ptr); + } + + static ALWAYS_INLINE void* UnsafelyUnwrapPtrForComparison( + uintptr_t wrapped_ptr) { + ++g_get_for_comparison_cnt; + return Super::UnsafelyUnwrapPtrForComparison(wrapped_ptr); + } + + static ALWAYS_INLINE void IncrementSwapCountForTest() { + ++g_checked_ptr_swap_cnt; + } +}; + +template <typename T> +using CountingCheckedPtr = CheckedPtr<T, CheckedPtrCountingNoOpImpl>; + +struct MyStruct { + int x; +}; + +struct Base1 { + explicit Base1(int b1) : b1(b1) {} + int b1; +}; + +struct Base2 { + explicit Base2(int b2) : b2(b2) {} + int b2; +}; + +struct Derived : Base1, Base2 { + Derived(int b1, int b2, int d) : Base1(b1), Base2(b2), d(d) {} + int d; +}; + +TEST(CheckedPtr, NullStarDereference) { + CheckedPtr<int> ptr = nullptr; + EXPECT_DEATH_IF_SUPPORTED(if (*ptr == 42) return, ""); +} + +TEST(CheckedPtr, NullArrowDereference) { + CheckedPtr<MyStruct> ptr = nullptr; + EXPECT_DEATH_IF_SUPPORTED(if (ptr->x == 42) return, ""); +} + +TEST(CheckedPtr, NullExtractNoDereference) { + CheckedPtr<int> ptr = nullptr; + int* raw = ptr; + std::ignore = raw; +} + +TEST(CheckedPtr, StarDereference) { + int foo = 42; + CheckedPtr<int> ptr = &foo; + EXPECT_EQ(*ptr, 42); +} + +TEST(CheckedPtr, ArrowDereference) { + MyStruct foo = {42}; + CheckedPtr<MyStruct> ptr = &foo; + EXPECT_EQ(ptr->x, 42); +} + +TEST(CheckedPtr, ConstVoidPtr) { + int32_t foo[] = {1234567890}; + CheckedPtr<const void> ptr = foo; + EXPECT_EQ(*static_cast<const int32_t*>(ptr), 1234567890); +} + +TEST(CheckedPtr, VoidPtr) { + int32_t foo[] = {1234567890}; + CheckedPtr<void> ptr = foo; + EXPECT_EQ(*static_cast<int32_t*>(ptr), 1234567890); +} + +TEST(CheckedPtr, OperatorEQ) { + int foo; + CheckedPtr<int> ptr1 = nullptr; + EXPECT_TRUE(ptr1 == ptr1); + + CheckedPtr<int> ptr2 = nullptr; + EXPECT_TRUE(ptr1 == ptr2); + + CheckedPtr<int> ptr3 = &foo; + EXPECT_TRUE(&foo == ptr3); + EXPECT_TRUE(ptr3 == &foo); + EXPECT_FALSE(ptr1 == ptr3); + + ptr1 = &foo; + EXPECT_TRUE(ptr1 == ptr3); + EXPECT_TRUE(ptr3 == ptr1); +} + +TEST(CheckedPtr, OperatorNE) { + int foo; + CheckedPtr<int> ptr1 = nullptr; + EXPECT_FALSE(ptr1 != ptr1); + + CheckedPtr<int> ptr2 = nullptr; + EXPECT_FALSE(ptr1 != ptr2); + + CheckedPtr<int> ptr3 = &foo; + EXPECT_FALSE(&foo != ptr3); + EXPECT_FALSE(ptr3 != &foo); + EXPECT_TRUE(ptr1 != ptr3); + + ptr1 = &foo; + EXPECT_FALSE(ptr1 != ptr3); + EXPECT_FALSE(ptr3 != ptr1); +} + +TEST(CheckedPtr, OperatorEQCast) { + ClearCounters(); + int foo = 42; + const int* raw_int_ptr = &foo; + void* raw_void_ptr = &foo; + CountingCheckedPtr<int> checked_int_ptr = &foo; + CountingCheckedPtr<const void> checked_void_ptr = &foo; + EXPECT_TRUE(checked_int_ptr == checked_int_ptr); + EXPECT_TRUE(checked_int_ptr == raw_int_ptr); + EXPECT_TRUE(raw_int_ptr == checked_int_ptr); + EXPECT_TRUE(checked_void_ptr == checked_void_ptr); + EXPECT_TRUE(checked_void_ptr == raw_void_ptr); + EXPECT_TRUE(raw_void_ptr == checked_void_ptr); + EXPECT_TRUE(checked_int_ptr == checked_void_ptr); + EXPECT_TRUE(checked_int_ptr == raw_void_ptr); + EXPECT_TRUE(raw_int_ptr == checked_void_ptr); + EXPECT_TRUE(checked_void_ptr == checked_int_ptr); + EXPECT_TRUE(checked_void_ptr == raw_int_ptr); + EXPECT_TRUE(raw_void_ptr == checked_int_ptr); + // Make sure that all cases are handled by operator== (faster) and none by the + // cast operator (slower). + EXPECT_EQ(g_get_for_comparison_cnt, 16); + EXPECT_EQ(g_get_for_extraction_cnt, 0); + EXPECT_EQ(g_get_for_dereference_cnt, 0); + + ClearCounters(); + Derived derived_val(42, 84, 1024); + Derived* raw_derived_ptr = &derived_val; + const Base1* raw_base1_ptr = &derived_val; + Base2* raw_base2_ptr = &derived_val; + CountingCheckedPtr<const Derived> checked_derived_ptr = &derived_val; + CountingCheckedPtr<Base1> checked_base1_ptr = &derived_val; + CountingCheckedPtr<const Base2> checked_base2_ptr = &derived_val; + EXPECT_TRUE(checked_derived_ptr == checked_derived_ptr); + EXPECT_TRUE(checked_derived_ptr == raw_derived_ptr); + EXPECT_TRUE(raw_derived_ptr == checked_derived_ptr); + EXPECT_TRUE(checked_derived_ptr == checked_base1_ptr); + EXPECT_TRUE(checked_derived_ptr == raw_base1_ptr); + EXPECT_TRUE(raw_derived_ptr == checked_base1_ptr); + EXPECT_TRUE(checked_base1_ptr == checked_derived_ptr); + EXPECT_TRUE(checked_base1_ptr == raw_derived_ptr); + EXPECT_TRUE(raw_base1_ptr == checked_derived_ptr); + // |base2_ptr| points to the second base class of |derived|, so will be + // located at an offset. While the stored raw uinptr_t values shouldn't match, + // ensure that the internal pointer manipulation correctly offsets when + // casting up and down the class hierarchy. + EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()), + reinterpret_cast<uintptr_t>(checked_derived_ptr.get())); + EXPECT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr), + reinterpret_cast<uintptr_t>(checked_derived_ptr.get())); + EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()), + reinterpret_cast<uintptr_t>(raw_derived_ptr)); + EXPECT_TRUE(checked_derived_ptr == checked_base2_ptr); + EXPECT_TRUE(checked_derived_ptr == raw_base2_ptr); + EXPECT_TRUE(raw_derived_ptr == checked_base2_ptr); + EXPECT_TRUE(checked_base2_ptr == checked_derived_ptr); + EXPECT_TRUE(checked_base2_ptr == raw_derived_ptr); + EXPECT_TRUE(raw_base2_ptr == checked_derived_ptr); + // Make sure that all cases are handled by operator== (faster) and none by the + // cast operator (slower). + // The 4 extractions come from .get() checks, that compare raw addresses. + EXPECT_EQ(g_get_for_comparison_cnt, 20); + EXPECT_EQ(g_get_for_extraction_cnt, 4); + EXPECT_EQ(g_get_for_dereference_cnt, 0); +} + +TEST(CheckedPtr, OperatorNECast) { + ClearCounters(); + int foo = 42; + int* raw_int_ptr = &foo; + const void* raw_void_ptr = &foo; + CountingCheckedPtr<const int> checked_int_ptr = &foo; + CountingCheckedPtr<void> checked_void_ptr = &foo; + EXPECT_FALSE(checked_int_ptr != checked_int_ptr); + EXPECT_FALSE(checked_int_ptr != raw_int_ptr); + EXPECT_FALSE(raw_int_ptr != checked_int_ptr); + EXPECT_FALSE(checked_void_ptr != checked_void_ptr); + EXPECT_FALSE(checked_void_ptr != raw_void_ptr); + EXPECT_FALSE(raw_void_ptr != checked_void_ptr); + EXPECT_FALSE(checked_int_ptr != checked_void_ptr); + EXPECT_FALSE(checked_int_ptr != raw_void_ptr); + EXPECT_FALSE(raw_int_ptr != checked_void_ptr); + EXPECT_FALSE(checked_void_ptr != checked_int_ptr); + EXPECT_FALSE(checked_void_ptr != raw_int_ptr); + EXPECT_FALSE(raw_void_ptr != checked_int_ptr); + // Make sure that all cases are handled by operator== (faster) and none by the + // cast operator (slower). + EXPECT_EQ(g_get_for_comparison_cnt, 16); + EXPECT_EQ(g_get_for_extraction_cnt, 0); + EXPECT_EQ(g_get_for_dereference_cnt, 0); + + ClearCounters(); + Derived derived_val(42, 84, 1024); + const Derived* raw_derived_ptr = &derived_val; + Base1* raw_base1_ptr = &derived_val; + const Base2* raw_base2_ptr = &derived_val; + CountingCheckedPtr<Derived> checked_derived_ptr = &derived_val; + CountingCheckedPtr<const Base1> checked_base1_ptr = &derived_val; + CountingCheckedPtr<Base2> checked_base2_ptr = &derived_val; + EXPECT_FALSE(checked_derived_ptr != checked_derived_ptr); + EXPECT_FALSE(checked_derived_ptr != raw_derived_ptr); + EXPECT_FALSE(raw_derived_ptr != checked_derived_ptr); + EXPECT_FALSE(checked_derived_ptr != checked_base1_ptr); + EXPECT_FALSE(checked_derived_ptr != raw_base1_ptr); + EXPECT_FALSE(raw_derived_ptr != checked_base1_ptr); + EXPECT_FALSE(checked_base1_ptr != checked_derived_ptr); + EXPECT_FALSE(checked_base1_ptr != raw_derived_ptr); + EXPECT_FALSE(raw_base1_ptr != checked_derived_ptr); + // |base2_ptr| points to the second base class of |derived|, so will be + // located at an offset. While the stored raw uinptr_t values shouldn't match, + // ensure that the internal pointer manipulation correctly offsets when + // casting up and down the class hierarchy. + EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()), + reinterpret_cast<uintptr_t>(checked_derived_ptr.get())); + EXPECT_NE(reinterpret_cast<uintptr_t>(raw_base2_ptr), + reinterpret_cast<uintptr_t>(checked_derived_ptr.get())); + EXPECT_NE(reinterpret_cast<uintptr_t>(checked_base2_ptr.get()), + reinterpret_cast<uintptr_t>(raw_derived_ptr)); + EXPECT_FALSE(checked_derived_ptr != checked_base2_ptr); + EXPECT_FALSE(checked_derived_ptr != raw_base2_ptr); + EXPECT_FALSE(raw_derived_ptr != checked_base2_ptr); + EXPECT_FALSE(checked_base2_ptr != checked_derived_ptr); + EXPECT_FALSE(checked_base2_ptr != raw_derived_ptr); + EXPECT_FALSE(raw_base2_ptr != checked_derived_ptr); + // Make sure that all cases are handled by operator== (faster) and none by the + // cast operator (slower). + // The 4 extractions come from .get() checks, that compare raw addresses. + EXPECT_EQ(g_get_for_comparison_cnt, 20); + EXPECT_EQ(g_get_for_extraction_cnt, 4); + EXPECT_EQ(g_get_for_dereference_cnt, 0); +} + +TEST(CheckedPtr, Cast) { + Derived derived_val(42, 84, 1024); + CheckedPtr<Derived> checked_derived_ptr = &derived_val; + Base1* raw_base1_ptr = checked_derived_ptr; + EXPECT_EQ(raw_base1_ptr->b1, 42); + Base2* raw_base2_ptr = checked_derived_ptr; + EXPECT_EQ(raw_base2_ptr->b2, 84); + + Derived* raw_derived_ptr = static_cast<Derived*>(raw_base1_ptr); + EXPECT_EQ(raw_derived_ptr->b1, 42); + EXPECT_EQ(raw_derived_ptr->b2, 84); + EXPECT_EQ(raw_derived_ptr->d, 1024); + raw_derived_ptr = static_cast<Derived*>(raw_base2_ptr); + EXPECT_EQ(raw_derived_ptr->b1, 42); + EXPECT_EQ(raw_derived_ptr->b2, 84); + EXPECT_EQ(raw_derived_ptr->d, 1024); + + CheckedPtr<Base1> checked_base1_ptr = raw_derived_ptr; + EXPECT_EQ(checked_base1_ptr->b1, 42); + CheckedPtr<Base2> checked_base2_ptr = raw_derived_ptr; + EXPECT_EQ(checked_base2_ptr->b2, 84); + + CheckedPtr<Derived> checked_derived_ptr2 = + static_cast<Derived*>(checked_base1_ptr); + EXPECT_EQ(checked_derived_ptr2->b1, 42); + EXPECT_EQ(checked_derived_ptr2->b2, 84); + EXPECT_EQ(checked_derived_ptr2->d, 1024); + checked_derived_ptr2 = static_cast<Derived*>(checked_base2_ptr); + EXPECT_EQ(checked_derived_ptr2->b1, 42); + EXPECT_EQ(checked_derived_ptr2->b2, 84); + EXPECT_EQ(checked_derived_ptr2->d, 1024); + + const Derived* raw_const_derived_ptr = checked_derived_ptr2; + EXPECT_EQ(raw_const_derived_ptr->b1, 42); + EXPECT_EQ(raw_const_derived_ptr->b2, 84); + EXPECT_EQ(raw_const_derived_ptr->d, 1024); + + CheckedPtr<const Derived> checked_const_derived_ptr = raw_const_derived_ptr; + EXPECT_EQ(checked_const_derived_ptr->b1, 42); + EXPECT_EQ(checked_const_derived_ptr->b2, 84); + EXPECT_EQ(checked_const_derived_ptr->d, 1024); + + void* raw_void_ptr = checked_derived_ptr; + CheckedPtr<void> checked_void_ptr = raw_derived_ptr; + CheckedPtr<Derived> checked_derived_ptr3 = + static_cast<Derived*>(raw_void_ptr); + CheckedPtr<Derived> checked_derived_ptr4 = + static_cast<Derived*>(checked_void_ptr); + EXPECT_EQ(checked_derived_ptr3->b1, 42); + EXPECT_EQ(checked_derived_ptr3->b2, 84); + EXPECT_EQ(checked_derived_ptr3->d, 1024); + EXPECT_EQ(checked_derived_ptr4->b1, 42); + EXPECT_EQ(checked_derived_ptr4->b2, 84); + EXPECT_EQ(checked_derived_ptr4->d, 1024); +} + +TEST(CheckedPtr, CustomSwap) { + ClearCounters(); + int foo1, foo2; + CountingCheckedPtr<int> ptr1(&foo1); + CountingCheckedPtr<int> ptr2(&foo2); + // Recommended use pattern. + using std::swap; + swap(ptr1, ptr2); + EXPECT_EQ(ptr1.get(), &foo2); + EXPECT_EQ(ptr2.get(), &foo1); + EXPECT_EQ(g_checked_ptr_swap_cnt, 1); +} + +TEST(CheckedPtr, StdSwap) { + ClearCounters(); + int foo1, foo2; + CountingCheckedPtr<int> ptr1(&foo1); + CountingCheckedPtr<int> ptr2(&foo2); + std::swap(ptr1, ptr2); + EXPECT_EQ(ptr1.get(), &foo2); + EXPECT_EQ(ptr2.get(), &foo1); + EXPECT_EQ(g_checked_ptr_swap_cnt, 0); +} + +TEST(CheckedPtr, AdvanceIntArray) { + // operator++ + int foo[] = {42, 43, 44, 45}; + CheckedPtr<int> ptr = foo; + for (int i = 0; i < 4; ++i, ++ptr) { + ASSERT_EQ(*ptr, 42 + i); + } + ptr = &foo[1]; + for (int i = 1; i < 4; ++i, ++ptr) { + ASSERT_EQ(*ptr, 42 + i); + } + + // operator-- + ptr = &foo[3]; + for (int i = 3; i >= 0; --i, --ptr) { + ASSERT_EQ(*ptr, 42 + i); + } + + // operator+= + ptr = foo; + for (int i = 0; i < 4; i += 2, ptr += 2) { + ASSERT_EQ(*ptr, 42 + i); + } + + // operator-= + ptr = &foo[3]; + for (int i = 3; i >= 0; i -= 2, ptr -= 2) { + ASSERT_EQ(*ptr, 42 + i); + } +} + +TEST(CheckedPtr, AdvanceString) { + const char kChars[] = "Hello"; + std::string str = kChars; + CheckedPtr<const char> ptr = str.c_str(); + for (size_t i = 0; i < str.size(); ++i, ++ptr) { + ASSERT_EQ(*ptr, kChars[i]); + } +} + +} // namespace diff --git a/chromium/base/memory/checked_ptr_unittest.nc b/chromium/base/memory/checked_ptr_unittest.nc new file mode 100644 index 00000000000..c19b47796e4 --- /dev/null +++ b/chromium/base/memory/checked_ptr_unittest.nc @@ -0,0 +1,86 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This is a "No Compile Test" suite. +// http://dev.chromium.org/developers/testing/no-compile-tests + +#include <tuple> // for std::ignore + +#include "base/memory/checked_ptr.h" + +namespace { + +struct Producer {}; +struct DerivedProducer : Producer {}; +struct OtherDerivedProducer : Producer {}; +struct Unrelated {}; +struct DerivedUnrelated : Unrelated {}; + +#if defined(NCTEST_AUTO_DOWNCAST) // [r"no viable conversion from 'CheckedPtr<\(anonymous namespace\)::Producer>' to 'CheckedPtr<\(anonymous namespace\)::DerivedProducer>'"] + +void WontCompile() { + Producer f; + CheckedPtr<Producer> ptr = &f; + CheckedPtr<DerivedProducer> derived_ptr = ptr; +} + +#elif defined(NCTEST_STATIC_DOWNCAST) // [r"no matching conversion for static_cast from 'CheckedPtr<\(anonymous namespace\)::Producer>' to 'CheckedPtr<\(anonymous namespace\)::DerivedProducer>'"] + +void WontCompile() { + Producer f; + CheckedPtr<Producer> ptr = &f; + CheckedPtr<DerivedProducer> derived_ptr = + static_cast<CheckedPtr<DerivedProducer>>(ptr); +} + +#elif defined(NCTEST_AUTO_REF_DOWNCAST) // [r"non-const lvalue reference to type 'CheckedPtr<\(anonymous namespace\)::DerivedProducer>' cannot bind to a value of unrelated type 'CheckedPtr<\(anonymous namespace\)::Producer>'"] + +void WontCompile() { + Producer f; + CheckedPtr<Producer> ptr = &f; + CheckedPtr<DerivedProducer>& derived_ptr = ptr; +} + +#elif defined(NCTEST_STATIC_REF_DOWNCAST) // [r"non-const lvalue reference to type 'CheckedPtr<\(anonymous namespace\)::DerivedProducer>' cannot bind to a value of unrelated type 'CheckedPtr<\(anonymous namespace\)::Producer>'"] + +void WontCompile() { + Producer f; + CheckedPtr<Producer> ptr = &f; + CheckedPtr<DerivedProducer>& derived_ptr = + static_cast<CheckedPtr<DerivedProducer>&>(ptr); +} + +#elif defined(NCTEST_AUTO_DOWNCAST_FROM_RAW) // [r"no viable conversion from '\(anonymous namespace\)::Producer \*' to 'CheckedPtr<\(anonymous namespace\)::DerivedProducer>'"] + +void WontCompile() { + Producer f; + CheckedPtr<DerivedProducer> ptr = &f; +} + +#elif defined(NCTEST_UNRELATED_FROM_RAW) // [r"no viable conversion from '\(anonymous namespace\)::DerivedProducer \*' to 'CheckedPtr<\(anonymous namespace\)::Unrelated>'"] + +void WontCompile() { + DerivedProducer f; + CheckedPtr<Unrelated> ptr = &f; +} + +#elif defined(NCTEST_UNRELATED_STATIC_FROM_WRAPPED) // [r"static_cast from '\(anonymous namespace\)::DerivedProducer \*' to '\(anonymous namespace\)::Unrelated \*', which are not related by inheritance, is not allowed"] + +void WontCompile() { + DerivedProducer f; + CheckedPtr<DerivedProducer> ptr = &f; + std::ignore = static_cast<Unrelated*>(ptr); +} + +#elif defined(NCTEST_VOID_DEREFERENCE) // [r"ISO C\+\+ does not allow indirection on operand of type 'const void \*' \[-Wvoid-ptr-dereference\]"] + +void WontCompile() { + const char foo[] = "42"; + CheckedPtr<const void> ptr = foo; + std::ignore = *ptr; +} + +#endif + +} // namespace diff --git a/chromium/base/memory/discardable_memory_allocator.cc b/chromium/base/memory/discardable_memory_allocator.cc index ac6a3ba48e2..ecd0ed03b15 100644 --- a/chromium/base/memory/discardable_memory_allocator.cc +++ b/chromium/base/memory/discardable_memory_allocator.cc @@ -6,7 +6,7 @@ #include <utility> -#include "base/logging.h" +#include "base/check.h" #include "base/process/memory.h" namespace base { diff --git a/chromium/base/memory/memory_pressure_monitor.cc b/chromium/base/memory/memory_pressure_monitor.cc index bab52f37553..7624c4557db 100644 --- a/chromium/base/memory/memory_pressure_monitor.cc +++ b/chromium/base/memory/memory_pressure_monitor.cc @@ -4,8 +4,9 @@ #include "base/memory/memory_pressure_monitor.h" -#include "base/logging.h" +#include "base/check.h" #include "base/metrics/histogram_macros.h" +#include "base/notreached.h" namespace base { namespace { diff --git a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc index aa858d7dcbe..79c6f6ebc1e 100644 --- a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc +++ b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc @@ -162,13 +162,15 @@ bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode( zx_info_handle_basic_t basic = {}; zx_status_t status = handle->get_info(ZX_INFO_HANDLE_BASIC, &basic, sizeof(basic), nullptr, nullptr); - if (status != ZX_OK) { + ZX_CHECK(status == ZX_OK, status) << "zx_object_get_info"; + + if (basic.type != ZX_OBJ_TYPE_VMO) { // TODO(crbug.com/838365): convert to DLOG when bug fixed. - ZX_LOG(ERROR, status) << "zx_object_get_info"; + LOG(ERROR) << "Received zircon handle is not a VMO"; return false; } - bool is_read_only = (basic.rights & kNoWriteOrExec) == basic.rights; + bool is_read_only = (basic.rights & (ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE)) == 0; bool expected_read_only = mode == Mode::kReadOnly; if (is_read_only != expected_read_only) { diff --git a/chromium/base/memory/platform_shared_memory_region_posix.cc b/chromium/base/memory/platform_shared_memory_region_posix.cc index 61ee2029a26..08eec31be31 100644 --- a/chromium/base/memory/platform_shared_memory_region_posix.cc +++ b/chromium/base/memory/platform_shared_memory_region_posix.cc @@ -9,6 +9,7 @@ #include "base/files/file.h" #include "base/files/file_util.h" +#include "base/logging.h" #include "base/metrics/histogram_macros.h" #include "base/threading/thread_restrictions.h" #include "build/build_config.h" diff --git a/chromium/base/memory/platform_shared_memory_region_unittest.cc b/chromium/base/memory/platform_shared_memory_region_unittest.cc index fbf5a216863..10e8fe0db65 100644 --- a/chromium/base/memory/platform_shared_memory_region_unittest.cc +++ b/chromium/base/memory/platform_shared_memory_region_unittest.cc @@ -4,7 +4,7 @@ #include "base/memory/platform_shared_memory_region.h" -#include "base/logging.h" +#include "base/check.h" #include "base/memory/shared_memory_mapping.h" #include "base/process/process_metrics.h" #include "base/system/sys_info.h" diff --git a/chromium/base/memory/platform_shared_memory_region_win.cc b/chromium/base/memory/platform_shared_memory_region_win.cc index 53d2a2fdc59..c58731abc3b 100644 --- a/chromium/base/memory/platform_shared_memory_region_win.cc +++ b/chromium/base/memory/platform_shared_memory_region_win.cc @@ -10,6 +10,7 @@ #include "base/allocator/partition_allocator/page_allocator.h" #include "base/bits.h" +#include "base/logging.h" #include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_macros.h" #include "base/process/process_handle.h" diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h index ac7183a49de..4ef63e85794 100644 --- a/chromium/base/memory/ref_counted.h +++ b/chromium/base/memory/ref_counted.h @@ -49,10 +49,6 @@ class BASE_EXPORT RefCountedBase { } void AddRef() const { - // TODO(maruel): Add back once it doesn't assert 500 times/sec. - // Current thread books the critical section "AddRelease" - // without release it. - // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_); #if DCHECK_IS_ON() DCHECK(!in_dtor_); DCHECK(!needs_adopt_ref_) @@ -71,11 +67,6 @@ class BASE_EXPORT RefCountedBase { bool Release() const { ReleaseImpl(); - // TODO(maruel): Add back once it doesn't assert 500 times/sec. - // Current thread books the critical section "AddRelease" - // without release it. - // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_); - #if DCHECK_IS_ON() DCHECK(!in_dtor_); if (ref_count_ == 0) @@ -277,9 +268,11 @@ class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final { // ~MyFoo(); // }; // -// You should always make your destructor non-public, to avoid any code deleting -// the object accidently while there are references to it. -// +// Usage Notes: +// 1. You should always make your destructor non-public, to avoid any code +// deleting the object accidentally while there are references to it. +// 2. You should always make the ref-counted base class a friend of your class, +// so that it can access the destructor. // // The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs // to trap unsafe cross thread usage. A subclass instance of RefCounted can be diff --git a/chromium/base/memory/ref_counted_memory.cc b/chromium/base/memory/ref_counted_memory.cc index fa140157f86..76ce768b754 100644 --- a/chromium/base/memory/ref_counted_memory.cc +++ b/chromium/base/memory/ref_counted_memory.cc @@ -6,7 +6,7 @@ #include <utility> -#include "base/logging.h" +#include "base/check_op.h" #include "base/memory/read_only_shared_memory_region.h" namespace base { diff --git a/chromium/base/memory/ref_counted_memory.h b/chromium/base/memory/ref_counted_memory.h index ab5fb4adf53..81ad49d74e9 100644 --- a/chromium/base/memory/ref_counted_memory.h +++ b/chromium/base/memory/ref_counted_memory.h @@ -43,7 +43,7 @@ class BASE_EXPORT RefCountedMemory // Alias for front() to make it possible for RefCountedMemory to implicitly // convert to span. - const unsigned char* data() { return front(); } + const unsigned char* data() const { return front(); } protected: friend class RefCountedThreadSafe<RefCountedMemory>; diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc index 7e0b9c5520d..d4b68bed9fb 100644 --- a/chromium/base/message_loop/message_loop.cc +++ b/chromium/base/message_loop/message_loop.cc @@ -7,7 +7,7 @@ #include <utility> #include "base/bind.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/memory/ptr_util.h" #include "base/message_loop/message_pump_default.h" #include "base/message_loop/message_pump_for_io.h" diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc index 129f6b75d4e..1d0ed42b239 100644 --- a/chromium/base/message_loop/message_loop_unittest.cc +++ b/chromium/base/message_loop/message_loop_unittest.cc @@ -387,7 +387,7 @@ class TestIOHandler : public MessagePumpForIO::IOHandler { }; TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait) - : signal_(signal), wait_(wait) { + : MessagePumpForIO::IOHandler(FROM_HERE), signal_(signal), wait_(wait) { memset(buffer_, 0, sizeof(buffer_)); file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING, @@ -1615,6 +1615,54 @@ TEST_F(MessageLoopTest, PostImmediateTaskFromSystemPump) { // https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125 This // is the delayed task equivalent of the above PostImmediateTaskFromSystemPump // test. +// +// As a reminder of how this works, here's the sequence of events in this test: +// 1) Test start: +// work_deduplicator.cc(24): BindToCurrentThread +// work_deduplicator.cc(34): OnWorkRequested +// thread_controller_with_message_pump_impl.cc(237) : DoWork +// work_deduplicator.cc(50): OnWorkStarted +// 2) SubPumpFunc entered: +// message_loop_unittest.cc(278): SubPumpFunc +// 3) ScopedNestableTaskAllower triggers nested ScheduleWork: +// work_deduplicator.cc(34): OnWorkRequested +// 4) Nested system loop starts and pumps internal kMsgHaveWork: +// message_loop_unittest.cc(282): SubPumpFunc : Got Message +// message_pump_win.cc(302): HandleWorkMessage +// thread_controller_with_message_pump_impl.cc(237) : DoWork +// 5) Attempt to DoWork(), there's nothing to do, NextWorkInfo indicates delay. +// work_deduplicator.cc(50): OnWorkStarted +// work_deduplicator.cc(58): WillCheckForMoreWork +// work_deduplicator.cc(67): DidCheckForMoreWork +// 6) Return control to HandleWorkMessage() which schedules native timer +// and goes to sleep (no kMsgHaveWork in native queue). +// message_pump_win.cc(328): HandleWorkMessage ScheduleNativeTimer +// 7) Native timer fires and posts the delayed application task: +// message_loop_unittest.cc(282): SubPumpFunc : Got Message +// message_loop_unittest.cc(1581): DelayedQuitOnSystemTimer +// !! This is the critical step verified by this test. Since the +// ThreadController is idle after (6), it won't be invoked again and thus +// won't get a chance to return a NextWorkInfo that indicates the next +// delay. A native timer is thus required to have SubPumpFunc handle it. +// work_deduplicator.cc(42): OnDelayedWorkRequested +// message_pump_win.cc(129): ScheduleDelayedWork +// 9) The scheduled native timer fires and runs application task binding +// ::PostQuitMessage : +// message_loop_unittest.cc(282) SubPumpFunc : Got Message +// work_deduplicator.cc(50): OnWorkStarted +// thread_controller_with_message_pump_impl.cc(237) : DoWork +// 10) SequenceManager updates delay to none and notifies +// (TODO(scheduler-dev): Could remove this step but WorkDeduplicator knows +// to ignore at least): +// work_deduplicator.cc(42): OnDelayedWorkRequested +// 11) Nested application task completes and SubPumpFunc unwinds: +// work_deduplicator.cc(58): WillCheckForMoreWork +// work_deduplicator.cc(67): DidCheckForMoreWork +// 12) ~ScopedNestableTaskAllower() makes sure WorkDeduplicator knows we're +// back in DoWork() (not relevant in this test but important overall). +// work_deduplicator.cc(50): OnWorkStarted +// 13) Application task which ran SubPumpFunc completes and test finishes. +// work_deduplicator.cc(67): DidCheckForMoreWork TEST_F(MessageLoopTest, PostDelayedTaskFromSystemPump) { MessageLoop message_loop(MessagePumpType::UI); diff --git a/chromium/base/message_loop/message_pump.cc b/chromium/base/message_loop/message_pump.cc index 877ba35cc4c..81ba252ee3e 100644 --- a/chromium/base/message_loop/message_pump.cc +++ b/chromium/base/message_loop/message_pump.cc @@ -4,9 +4,11 @@ #include "base/message_loop/message_pump.h" +#include "base/check.h" #include "base/message_loop/message_pump_default.h" #include "base/message_loop/message_pump_for_io.h" #include "base/message_loop/message_pump_for_ui.h" +#include "base/notreached.h" #if defined(OS_MACOSX) #include "base/message_loop/message_pump_mac.h" diff --git a/chromium/base/message_loop/message_pump_android.cc b/chromium/base/message_loop/message_pump_android.cc index 9b514918228..85a25deab2f 100644 --- a/chromium/base/message_loop/message_pump_android.cc +++ b/chromium/base/message_loop/message_pump_android.cc @@ -17,8 +17,9 @@ #include "base/android/jni_android.h" #include "base/android/scoped_java_ref.h" #include "base/callback_helpers.h" +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/run_loop.h" #include "build/build_config.h" diff --git a/chromium/base/message_loop/message_pump_fuchsia.cc b/chromium/base/message_loop/message_pump_fuchsia.cc index c4a15598e14..1c575681064 100644 --- a/chromium/base/message_loop/message_pump_fuchsia.cc +++ b/chromium/base/message_loop/message_pump_fuchsia.cc @@ -60,8 +60,7 @@ bool MessagePumpFuchsia::ZxHandleWatchController::StopWatchingZxHandle() { if (!weak_pump_) return true; - // |handler| is set when waiting for a signal. - if (!handler) + if (!is_active()) return true; async_wait_t::handler = nullptr; @@ -150,7 +149,7 @@ MessagePumpFuchsia::FdWatchController::~FdWatchController() { } bool MessagePumpFuchsia::FdWatchController::WaitBegin() { - // Refresh the |handle_| and |desired_signals_| from the mxio for the fd. + // Refresh the |handle_| and |desired_signals_| from the fdio for the fd. // Some types of fdio map read/write events to different signals depending on // their current state, so we must do this every time we begin to wait. fdio_unsafe_wait_begin(io_, desired_events_, &object, &trigger); @@ -229,8 +228,10 @@ bool MessagePumpFuchsia::WatchZxHandle(zx_handle_t handle, DCHECK_NE(0u, signals); DCHECK(controller); DCHECK(delegate); - DCHECK(handle == ZX_HANDLE_INVALID || - controller->async_wait_t::object == ZX_HANDLE_INVALID || + + // If the watch controller is active then WatchZxHandle() can be called only + // for the same handle. + DCHECK(handle == ZX_HANDLE_INVALID || !controller->is_active() || handle == controller->async_wait_t::object); if (!controller->StopWatchingZxHandle()) diff --git a/chromium/base/message_loop/message_pump_fuchsia.h b/chromium/base/message_loop/message_pump_fuchsia.h index 2b21b53205f..d494a4f0d7e 100644 --- a/chromium/base/message_loop/message_pump_fuchsia.h +++ b/chromium/base/message_loop/message_pump_fuchsia.h @@ -53,6 +53,8 @@ class BASE_EXPORT MessagePumpFuchsia : public MessagePump, virtual bool WaitBegin(); + bool is_active() const { return async_wait_t::handler != nullptr; } + static void HandleSignal(async_dispatcher_t* async, async_wait_t* wait, zx_status_t status, @@ -105,7 +107,8 @@ class BASE_EXPORT MessagePumpFuchsia : public MessagePump, int fd_ = -1; uint32_t desired_events_ = 0; - // Set by WatchFileDescriptor to hold a reference to the descriptor's mxio. + // Set by WatchFileDescriptor() to hold a reference to the descriptor's + // fdio. fdio_t* io_ = nullptr; DISALLOW_COPY_AND_ASSIGN(FdWatchController); diff --git a/chromium/base/message_loop/message_pump_mac.mm b/chromium/base/message_loop/message_pump_mac.mm index bb9e6d30bed..cbda789a0d2 100644 --- a/chromium/base/message_loop/message_pump_mac.mm +++ b/chromium/base/message_loop/message_pump_mac.mm @@ -10,11 +10,12 @@ #include <memory> #include "base/auto_reset.h" +#include "base/check_op.h" #include "base/feature_list.h" -#include "base/logging.h" #include "base/mac/call_with_eh_frame.h" #include "base/mac/scoped_cftyperef.h" #include "base/message_loop/timer_slack.h" +#include "base/notreached.h" #include "base/run_loop.h" #include "base/stl_util.h" #include "base/time/time.h" diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc index c50e34f6da1..83cbec19c29 100644 --- a/chromium/base/message_loop/message_pump_win.cc +++ b/chromium/base/message_loop/message_pump_win.cc @@ -10,6 +10,7 @@ #include "base/bind.h" #include "base/debug/alias.h" +#include "base/feature_list.h" #include "base/metrics/histogram_macros.h" #include "base/numerics/ranges.h" #include "base/numerics/safe_conversions.h" @@ -19,6 +20,16 @@ namespace base { namespace { +// Jank analysis uncovered that Windows uses native ::PeekMessage calls as an +// opportunity to yield to other threads according to some heuristics (e.g. +// presumably when there's no input but perhaps a single WM_USER message posted +// later than another thread was readied). MessagePumpForUI doesn't intend to +// give this opportunity to the kernel when invoking ::PeekMessage however as it +// runs most tasks out-of-band. Hence, PM_NOYIELD should be used to tell +// ::PeekMessage it's not the only source of work for this thread. +const Feature kNoYieldFromNativePeek{"NoYieldFromNativePeek", + FEATURE_DISABLED_BY_DEFAULT}; + enum MessageLoopProblems { MESSAGE_POST_ERROR, COMPLETION_POST_ERROR, @@ -138,6 +149,7 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) { // from it. This is the only case where we must install/adjust the native // timer from ScheduleDelayedWork() because if we don't, the native loop will // go back to sleep, unaware of the new |delayed_work_time|. + // See MessageLoopTest.PostDelayedTaskFromSystemPump for an example. // TODO(gab): This could potentially be replaced by a ForegroundIdleProc hook // if Windows ends up being the only platform requiring ScheduleDelayedWork(). if (in_native_loop_ && !work_scheduled_) { @@ -206,8 +218,6 @@ void MessagePumpForUI::DoRunLoop() { // work. in_native_loop_ = false; - state_->delegate->BeforeDoInternalWork(); - DCHECK(!in_native_loop_); bool more_work_is_plausible = ProcessNextWindowsMessage(); in_native_loop_ = false; @@ -277,12 +287,23 @@ void MessagePumpForUI::WaitForWork(Delegate::NextWorkInfo next_work_info) { // some time to process its input messages by looping back to // MsgWaitForMultipleObjectsEx above when there are no messages for the // current thread. - MSG msg = {0}; - bool has_pending_sent_message = - (HIWORD(::GetQueueStatus(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0; - if (has_pending_sent_message || - ::PeekMessage(&msg, nullptr, 0, 0, PM_NOREMOVE)) { - return; + + { + // Trace as in ProcessNextWindowsMessage(). + TRACE_EVENT0("base", "MessagePumpForUI::WaitForWork GetQueueStatus"); + if (HIWORD(::GetQueueStatus(QS_SENDMESSAGE)) & QS_SENDMESSAGE) + return; + } + + { + static const auto kAdditionalFlags = + FeatureList::IsEnabled(kNoYieldFromNativePeek) ? PM_NOYIELD : 0x0; + + MSG msg; + // Trace as in ProcessNextWindowsMessage(). + TRACE_EVENT0("base", "MessagePumpForUI::WaitForWork PeekMessage"); + if (::PeekMessage(&msg, nullptr, 0, 0, kAdditionalFlags | PM_NOREMOVE)) + return; } // We know there are no more messages for this thread because PeekMessage @@ -435,16 +456,46 @@ bool MessagePumpForUI::ProcessNextWindowsMessage() { // dispatches the message and returns false. We return true in this // case to ensure that the message loop peeks again instead of calling // MsgWaitForMultipleObjectsEx. - bool sent_messages_in_queue = false; - DWORD queue_status = ::GetQueueStatus(QS_SENDMESSAGE); - if (HIWORD(queue_status) & QS_SENDMESSAGE) - sent_messages_in_queue = true; + bool more_work_is_plausible = false; + { + // Individually trace ::GetQueueStatus and ::PeekMessage because sampling + // profiler is hinting that we're spending a surprising amount of time with + // these on top of the stack. Tracing will be able to tell us whether this + // is a bias of sampling profiler (e.g. kernel takes ::GetQueueStatus as an + // opportunity to swap threads and is more likely to schedule the sampling + // profiler's thread while the sampled thread is swapped out on this frame). + TRACE_EVENT0("base", + "MessagePumpForUI::ProcessNextWindowsMessage GetQueueStatus"); + DWORD queue_status = ::GetQueueStatus(QS_SENDMESSAGE); + if (HIWORD(queue_status) & QS_SENDMESSAGE) + more_work_is_plausible = true; + } MSG msg; - if (::PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) - return ProcessMessageHelper(msg); + bool has_msg = false; + { + // ::PeekMessage() may process sent messages (regardless of |had_messages| + // as ::GetQueueStatus() is an optimistic check that may racily have missed + // an incoming event -- it doesn't hurt to have empty internal units of work + // when ::PeekMessage turns out to be a no-op). + state_->delegate->BeforeDoInternalWork(); + + static const auto kAdditionalFlags = + FeatureList::IsEnabled(kNoYieldFromNativePeek) ? PM_NOYIELD : 0x0; + + // PeekMessage can run a message if there are sent messages, trace that and + // emit the boolean param to see if it ever janks independently (ref. + // comment on GetQueueStatus). + TRACE_EVENT1("base", + "MessagePumpForUI::ProcessNextWindowsMessage PeekMessage", + "sent_messages_in_queue", more_work_is_plausible); + has_msg = ::PeekMessage(&msg, nullptr, 0, 0, + kAdditionalFlags | PM_REMOVE) != FALSE; + } + if (has_msg) + more_work_is_plausible |= ProcessMessageHelper(msg); - return sent_messages_in_queue; + return more_work_is_plausible; } bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) { @@ -470,6 +521,8 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) { if (msg.message == kMsgHaveWork && msg.hwnd == message_window_.hwnd()) return ProcessPumpReplacementMessage(); + state_->delegate->BeforeDoInternalWork(); + for (Observer& observer : observers_) observer.WillDispatchMSG(msg); ::TranslateMessage(&msg); @@ -492,6 +545,10 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() { // that peeked replacement. Note that the re-post of kMsgHaveWork may be // asynchronous to this thread!! + // As in ProcessNextWindowsMessage() since ::PeekMessage() may process + // sent-messages. + state_->delegate->BeforeDoInternalWork(); + MSG msg; const bool have_message = ::PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE; @@ -544,6 +601,11 @@ MessagePumpForIO::IOContext::IOContext() { memset(&overlapped, 0, sizeof(overlapped)); } +MessagePumpForIO::IOHandler::IOHandler(const Location& from_here) + : io_handler_location_(from_here) {} + +MessagePumpForIO::IOHandler::~IOHandler() = default; + MessagePumpForIO::MessagePumpForIO() { port_.Set(::CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr, reinterpret_cast<ULONG_PTR>(nullptr), 1)); @@ -678,6 +740,9 @@ bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) { // Save this item for later completed_io_.push_back(item); } else { + TRACE_EVENT2("base,toplevel", "IOHandler::OnIOCompleted", "dest_file", + item.handler->io_handler_location().file_name(), "dest_func", + item.handler->io_handler_location().function_name()); item.handler->OnIOCompleted(item.context, item.bytes_transfered, item.error); } diff --git a/chromium/base/message_loop/message_pump_win.h b/chromium/base/message_loop/message_pump_win.h index a55947fd731..786ae8054f3 100644 --- a/chromium/base/message_loop/message_pump_win.h +++ b/chromium/base/message_loop/message_pump_win.h @@ -12,6 +12,7 @@ #include <memory> #include "base/base_export.h" +#include "base/location.h" #include "base/message_loop/message_pump.h" #include "base/observer_list.h" #include "base/optional.h" @@ -198,7 +199,7 @@ class BASE_EXPORT MessagePumpForIO : public MessagePumpWin { // // Typical use #1: // class MyFile : public IOHandler { - // MyFile() { + // MyFile() : IOHandler(FROM_HERE) { // ... // message_pump->RegisterIOHandler(file_, this); // } @@ -228,9 +229,14 @@ class BASE_EXPORT MessagePumpForIO : public MessagePumpWin { // message_pump->WaitForIOCompletion(INFINITE, this); // } // - class IOHandler { + class BASE_EXPORT IOHandler { public: - virtual ~IOHandler() {} + explicit IOHandler(const Location& from_here); + virtual ~IOHandler(); + + IOHandler(const IOHandler&) = delete; + IOHandler& operator=(const IOHandler&) = delete; + // This will be called once the pending IO operation associated with // |context| completes. |error| is the Win32 error code of the IO operation // (ERROR_SUCCESS if there was no error). |bytes_transfered| will be zero @@ -238,6 +244,11 @@ class BASE_EXPORT MessagePumpForIO : public MessagePumpWin { virtual void OnIOCompleted(IOContext* context, DWORD bytes_transfered, DWORD error) = 0; + + const Location& io_handler_location() { return io_handler_location_; } + + private: + const Location io_handler_location_; }; MessagePumpForIO(); diff --git a/chromium/base/metrics/OWNERS b/chromium/base/metrics/OWNERS index f70e5d4c3ab..7134ec98bcf 100644 --- a/chromium/base/metrics/OWNERS +++ b/chromium/base/metrics/OWNERS @@ -1,3 +1,9 @@ +# Note: Unless you want a specific reviewer's expertise, please send CLs to +# chromium-metrics-reviews@google.com rather than to specific individuals. These +# CLs will be automatically reassigned to a reviewer within about 5 minutes. +# This approach helps our team to load-balance incoming reviews. Googlers can +# read more about this at go/gwsq-gerrit. + asvitkine@chromium.org bcwhite@chromium.org holte@chromium.org diff --git a/chromium/base/metrics/bucket_ranges.cc b/chromium/base/metrics/bucket_ranges.cc index a3473bbf789..073f51707d1 100644 --- a/chromium/base/metrics/bucket_ranges.cc +++ b/chromium/base/metrics/bucket_ranges.cc @@ -6,7 +6,6 @@ #include <cmath> -#include "base/logging.h" #include "base/metrics/crc32.h" namespace base { diff --git a/chromium/base/metrics/dummy_histogram.cc b/chromium/base/metrics/dummy_histogram.cc index 2707733b2d4..ca7c4d9d30d 100644 --- a/chromium/base/metrics/dummy_histogram.cc +++ b/chromium/base/metrics/dummy_histogram.cc @@ -6,9 +6,10 @@ #include <memory> -#include "base/logging.h" #include "base/metrics/histogram_samples.h" #include "base/metrics/metrics_hashes.h" +#include "base/notreached.h" +#include "base/values.h" namespace base { @@ -99,4 +100,11 @@ std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotFinalDelta() const { return std::make_unique<DummyHistogramSamples>(); } +base::DictionaryValue DummyHistogram::ToGraphDict() const { + base::DictionaryValue dict; + dict.SetString("header", "dummy"); + dict.SetString("body", "dummy"); + return dict; +} + } // namespace base diff --git a/chromium/base/metrics/dummy_histogram.h b/chromium/base/metrics/dummy_histogram.h index e2cb64ecbbb..6c17cc8924b 100644 --- a/chromium/base/metrics/dummy_histogram.h +++ b/chromium/base/metrics/dummy_histogram.h @@ -13,6 +13,7 @@ #include "base/base_export.h" #include "base/metrics/histogram_base.h" #include "base/no_destructor.h" +#include "base/values.h" namespace base { @@ -36,8 +37,8 @@ class BASE_EXPORT DummyHistogram : public HistogramBase { std::unique_ptr<HistogramSamples> SnapshotSamples() const override; std::unique_ptr<HistogramSamples> SnapshotDelta() override; std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override; - void WriteHTMLGraph(std::string* output) const override {} void WriteAscii(std::string* output) const override {} + base::DictionaryValue ToGraphDict() const override; protected: // HistogramBase: diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc index 91e59060e6b..12782db9a9d 100644 --- a/chromium/base/metrics/histogram.cc +++ b/chromium/base/metrics/histogram.cc @@ -31,12 +31,12 @@ #include "base/pickle.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" +#include "base/strings/utf_string_conversions.h" #include "base/synchronization/lock.h" #include "base/values.h" #include "build/build_config.h" namespace { -constexpr char kHtmlNewLine[] = "<br>"; constexpr char kAsciiNewLine[] = "\n"; } // namespace @@ -579,21 +579,6 @@ bool Histogram::AddSamplesFromPickle(PickleIterator* iter) { return unlogged_samples_->AddFromPickle(iter); } -// The following methods provide a graphical histogram display. -void Histogram::WriteHTMLGraph(std::string* output) const { - // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc. - - // Get local (stack) copies of all effectively volatile class data so that we - // are consistent across our output activities. - std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples(); - output->append("<PRE>"); - output->append("<h4>"); - WriteAsciiHeader(*snapshot, output); - output->append("</h4>"); - WriteAsciiBody(*snapshot, true, kHtmlNewLine, output); - output->append("</PRE>"); -} - void Histogram::WriteAscii(std::string* output) const { // Get local (stack) copies of all effectively volatile class data so that we // are consistent across our output activities. @@ -603,6 +588,20 @@ void Histogram::WriteAscii(std::string* output) const { WriteAsciiBody(*snapshot, true, kAsciiNewLine, output); } +base::DictionaryValue Histogram::ToGraphDict() const { + std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples(); + std::string header; + std::string body; + base::DictionaryValue dict; + + WriteAsciiHeader(*snapshot, &header); + WriteAsciiBody(*snapshot, true, kAsciiNewLine, &body); + dict.SetString("header", header); + dict.SetString("body", body); + + return dict; +} + void Histogram::ValidateHistogramContents() const { CHECK(unlogged_samples_); CHECK(unlogged_samples_->bucket_ranges()); diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h index 91597cd0f10..ba300c70917 100644 --- a/chromium/base/metrics/histogram.h +++ b/chromium/base/metrics/histogram.h @@ -83,6 +83,7 @@ #include "base/metrics/histogram_samples.h" #include "base/strings/string_piece.h" #include "base/time/time.h" +#include "base/values.h" namespace base { @@ -217,8 +218,8 @@ class BASE_EXPORT Histogram : public HistogramBase { std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override; void AddSamples(const HistogramSamples& samples) override; bool AddSamplesFromPickle(base::PickleIterator* iter) override; - void WriteHTMLGraph(std::string* output) const override; void WriteAscii(std::string* output) const override; + base::DictionaryValue ToGraphDict() const override; // Validates the histogram contents and CHECKs on errors. // TODO(bcwhite): Remove this after https://crbug/836875. diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc index 9904fd1adb4..8d55e92e1dd 100644 --- a/chromium/base/metrics/histogram_base.cc +++ b/chromium/base/metrics/histogram_base.cc @@ -10,14 +10,15 @@ #include <set> #include <utility> +#include "base/check_op.h" #include "base/json/json_string_value_serializer.h" -#include "base/logging.h" #include "base/metrics/histogram.h" #include "base/metrics/histogram_macros.h" #include "base/metrics/histogram_samples.h" #include "base/metrics/sparse_histogram.h" #include "base/metrics/statistics_recorder.h" #include "base/no_destructor.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/pickle.h" #include "base/process/process_handle.h" diff --git a/chromium/base/metrics/histogram_base.h b/chromium/base/metrics/histogram_base.h index f128ff2d9dc..de3a2cc6427 100644 --- a/chromium/base/metrics/histogram_base.h +++ b/chromium/base/metrics/histogram_base.h @@ -18,6 +18,7 @@ #include "base/macros.h" #include "base/strings/string_piece.h" #include "base/time/time.h" +#include "base/values.h" namespace base { @@ -232,10 +233,14 @@ class BASE_EXPORT HistogramBase { // read-only memory. virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0; - // The following methods provide graphical histogram displays. - virtual void WriteHTMLGraph(std::string* output) const = 0; + // The following method provides graphical histogram displays. virtual void WriteAscii(std::string* output) const = 0; + // Returns histogram data as a Dict with the following format: + // {"header": "Name of the histogram with samples, mean, and/or flags", + // "body": "ASCII histogram representation"} + virtual base::DictionaryValue ToGraphDict() const = 0; + // TODO(bcwhite): Remove this after https://crbug/836875. virtual void ValidateHistogramContents() const; diff --git a/chromium/base/metrics/histogram_snapshot_manager.cc b/chromium/base/metrics/histogram_snapshot_manager.cc index c1b804ebde1..fa04b54235d 100644 --- a/chromium/base/metrics/histogram_snapshot_manager.cc +++ b/chromium/base/metrics/histogram_snapshot_manager.cc @@ -7,6 +7,7 @@ #include <memory> #include "base/debug/alias.h" +#include "base/logging.h" #include "base/metrics/histogram_flattener.h" #include "base/metrics/histogram_samples.h" #include "base/metrics/statistics_recorder.h" diff --git a/chromium/base/metrics/histogram_unittest.cc b/chromium/base/metrics/histogram_unittest.cc index a01d100d9ec..aef19340d4b 100644 --- a/chromium/base/metrics/histogram_unittest.cc +++ b/chromium/base/metrics/histogram_unittest.cc @@ -28,6 +28,7 @@ #include "base/strings/stringprintf.h" #include "base/test/gtest_util.h" #include "base/time/time.h" +#include "base/values.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" @@ -916,22 +917,24 @@ TEST_P(HistogramTest, WriteAscii) { EXPECT_THAT(output, testing::MatchesRegex(kOutputFormatRe)); } -TEST_P(HistogramTest, WriteHTMLGraph) { +TEST_P(HistogramTest, ToGraphDict) { HistogramBase* histogram = LinearHistogram::FactoryGet("HTMLOut", /*minimum=*/1, /*maximum=*/10, /*bucket_count=*/5, HistogramBase::kNoFlags); histogram->AddCount(/*sample=*/4, /*value=*/5); - std::string output; - histogram->WriteHTMLGraph(&output); + base::DictionaryValue output = histogram->ToGraphDict(); + std::string* header = output.FindStringKey("header"); + std::string* body = output.FindStringKey("body"); - const char kOutputFormatRe[] = - R"(<PRE><h4>Histogram: HTMLOut recorded 5 samples, mean = 4\.0.*<\/h4>)" - R"(0 \.\.\. <br>)" - R"(4 -+O \(5 = 100\.0%\) \{0\.0%\}<br>)" - R"(7 \.\.\. <br><\/PRE>)"; + const char kOutputHeaderFormatRe[] = + R"(Histogram: HTMLOut recorded 5 samples, mean = 4\.0.*)"; + const char kOutputBodyFormatRe[] = R"(0 \.\.\. \n)" + R"(4 -+O \(5 = 100\.0%\) \{0\.0%\}\n)" + R"(7 \.\.\. \n)"; - EXPECT_THAT(output, testing::MatchesRegex(kOutputFormatRe)); + EXPECT_THAT(*header, testing::MatchesRegex(kOutputHeaderFormatRe)); + EXPECT_THAT(*body, testing::MatchesRegex(kOutputBodyFormatRe)); } } // namespace base diff --git a/chromium/base/metrics/metrics_hashes.cc b/chromium/base/metrics/metrics_hashes.cc index ef7072a4a83..1f90b56a063 100644 --- a/chromium/base/metrics/metrics_hashes.cc +++ b/chromium/base/metrics/metrics_hashes.cc @@ -4,8 +4,10 @@ #include "base/metrics/metrics_hashes.h" +#include <string.h> + +#include "base/check_op.h" #include "base/hash/md5.h" -#include "base/logging.h" #include "base/sys_byteorder.h" namespace base { diff --git a/chromium/base/metrics/metrics_hashes_unittest.cc b/chromium/base/metrics/metrics_hashes_unittest.cc index ec3446f2d8a..cc6247e5c29 100644 --- a/chromium/base/metrics/metrics_hashes_unittest.cc +++ b/chromium/base/metrics/metrics_hashes_unittest.cc @@ -16,6 +16,7 @@ namespace base { // Make sure our ID hashes are the same as what we see on the server side. TEST(MetricsUtilTest, HashMetricName) { + // The cases must match those in //tools/metrics/ukm/codegen_test.py. static const struct { std::string input; std::string output; diff --git a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc index aee6ceb792f..0776ad4dacc 100644 --- a/chromium/base/metrics/persistent_histogram_allocator_unittest.cc +++ b/chromium/base/metrics/persistent_histogram_allocator_unittest.cc @@ -7,7 +7,6 @@ #include "base/files/file.h" #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" -#include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/metrics/bucket_ranges.h" #include "base/metrics/histogram_macros.h" diff --git a/chromium/base/metrics/persistent_sample_map.cc b/chromium/base/metrics/persistent_sample_map.cc index ba73128b7d5..06ee80e8a81 100644 --- a/chromium/base/metrics/persistent_sample_map.cc +++ b/chromium/base/metrics/persistent_sample_map.cc @@ -4,9 +4,10 @@ #include "base/metrics/persistent_sample_map.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/metrics/histogram_macros.h" #include "base/metrics/persistent_histogram_allocator.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/stl_util.h" diff --git a/chromium/base/metrics/sample_map.cc b/chromium/base/metrics/sample_map.cc index f9252386af8..ec7e6aa1235 100644 --- a/chromium/base/metrics/sample_map.cc +++ b/chromium/base/metrics/sample_map.cc @@ -4,7 +4,7 @@ #include "base/metrics/sample_map.h" -#include "base/logging.h" +#include "base/check.h" #include "base/numerics/safe_conversions.h" #include "base/stl_util.h" diff --git a/chromium/base/metrics/sample_vector.cc b/chromium/base/metrics/sample_vector.cc index cf8634e8367..a465fbd6c91 100644 --- a/chromium/base/metrics/sample_vector.cc +++ b/chromium/base/metrics/sample_vector.cc @@ -4,10 +4,11 @@ #include "base/metrics/sample_vector.h" +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/metrics/persistent_memory_allocator.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/synchronization/lock.h" #include "base/threading/platform_thread.h" diff --git a/chromium/base/metrics/sparse_histogram.cc b/chromium/base/metrics/sparse_histogram.cc index 29080916c3f..61635d0417f 100644 --- a/chromium/base/metrics/sparse_histogram.cc +++ b/chromium/base/metrics/sparse_histogram.cc @@ -6,6 +6,7 @@ #include <utility> +#include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/metrics/dummy_histogram.h" #include "base/metrics/metrics_hashes.h" @@ -15,10 +16,11 @@ #include "base/metrics/statistics_recorder.h" #include "base/pickle.h" #include "base/strings/stringprintf.h" +#include "base/strings/utf_string_conversions.h" #include "base/synchronization/lock.h" +#include "base/values.h" namespace { -constexpr char kHtmlNewLine[] = "<br>"; constexpr char kAsciiNewLine[] = "\n"; } // namespace @@ -168,25 +170,27 @@ bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) { return unlogged_samples_->AddFromPickle(iter); } -void SparseHistogram::WriteHTMLGraph(std::string* output) const { +void SparseHistogram::WriteAscii(std::string* output) const { // Get a local copy of the data so we are consistent. std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples(); - output->append("<PRE>"); - output->append("<h4>"); WriteAsciiHeader(*snapshot, output); - output->append("</h4>"); - WriteAsciiBody(*snapshot, true, kHtmlNewLine, output); - output->append("</PRE>"); + output->append(kAsciiNewLine); + WriteAsciiBody(*snapshot, true, kAsciiNewLine, output); } -void SparseHistogram::WriteAscii(std::string* output) const { - // Get a local copy of the data so we are consistent. +base::DictionaryValue SparseHistogram::ToGraphDict() const { std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples(); + std::string header; + std::string body; + base::DictionaryValue dict; - WriteAsciiHeader(*snapshot, output); - output->append(kAsciiNewLine); - WriteAsciiBody(*snapshot, true, kAsciiNewLine, output); + WriteAsciiHeader(*snapshot, &header); + WriteAsciiBody(*snapshot, true, kAsciiNewLine, &body); + dict.SetString("header", header); + dict.SetString("body", body); + + return dict; } void SparseHistogram::SerializeInfoImpl(Pickle* pickle) const { diff --git a/chromium/base/metrics/sparse_histogram.h b/chromium/base/metrics/sparse_histogram.h index 232c8f27207..ab1d9157063 100644 --- a/chromium/base/metrics/sparse_histogram.h +++ b/chromium/base/metrics/sparse_histogram.h @@ -17,6 +17,7 @@ #include "base/metrics/histogram_base.h" #include "base/metrics/histogram_samples.h" #include "base/synchronization/lock.h" +#include "base/values.h" namespace base { @@ -54,8 +55,8 @@ class BASE_EXPORT SparseHistogram : public HistogramBase { std::unique_ptr<HistogramSamples> SnapshotSamples() const override; std::unique_ptr<HistogramSamples> SnapshotDelta() override; std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override; - void WriteHTMLGraph(std::string* output) const override; void WriteAscii(std::string* output) const override; + base::DictionaryValue ToGraphDict() const override; protected: // HistogramBase implementation: diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc index 6489b810473..a92b68b4a6d 100644 --- a/chromium/base/metrics/sparse_histogram_unittest.cc +++ b/chromium/base/metrics/sparse_histogram_unittest.cc @@ -7,6 +7,7 @@ #include <memory> #include <string> +#include "base/logging.h" #include "base/metrics/histogram_base.h" #include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_samples.h" @@ -18,6 +19,7 @@ #include "base/pickle.h" #include "base/stl_util.h" #include "base/strings/stringprintf.h" +#include "base/values.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" @@ -402,21 +404,23 @@ TEST_P(SparseHistogramTest, WriteAscii) { EXPECT_THAT(output, testing::MatchesRegex(kOutputFormatRe)); } -TEST_P(SparseHistogramTest, WriteHTMLGraph) { +TEST_P(SparseHistogramTest, ToGraphDict) { HistogramBase* histogram = SparseHistogram::FactoryGet("HTMLOut", HistogramBase::kNoFlags); histogram->AddCount(/*sample=*/4, /*value=*/5); histogram->AddCount(/*sample=*/10, /*value=*/15); - std::string output; - histogram->WriteHTMLGraph(&output); + base::DictionaryValue output = histogram->ToGraphDict(); + std::string* header = output.FindStringKey("header"); + std::string* body = output.FindStringKey("body"); - const char kOutputFormatRe[] = - R"(<PRE><h4>Histogram: HTMLOut recorded 20 samples.*<\/h4>)" - R"(4 -+O +\(5 = 25.0%\)<br>)" - R"(10 -+O +\(15 = 75.0%\)<br><\/PRE>)"; + const char kOutputHeaderFormatRe[] = + R"(Histogram: HTMLOut recorded 20 samples.*)"; + const char kOutputBodyFormatRe[] = R"(4 -+O +\(5 = 25.0%\)\n)" + R"(10 -+O +\(15 = 75.0%\)\n)"; - EXPECT_THAT(output, testing::MatchesRegex(kOutputFormatRe)); + EXPECT_THAT(*header, testing::MatchesRegex(kOutputHeaderFormatRe)); + EXPECT_THAT(*body, testing::MatchesRegex(kOutputBodyFormatRe)); } } // namespace base diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc index aa6bb4aa949..58a442d7a90 100644 --- a/chromium/base/metrics/statistics_recorder.cc +++ b/chromium/base/metrics/statistics_recorder.cc @@ -142,16 +142,6 @@ const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges( } // static -void StatisticsRecorder::WriteHTMLGraph(const std::string& query, - std::string* output) { - for (const HistogramBase* const histogram : - Sort(WithName(GetHistograms(), query))) { - histogram->WriteHTMLGraph(output); - *output += "<br><hr><br>"; - } -} - -// static void StatisticsRecorder::WriteGraph(const std::string& query, std::string* output) { if (query.length()) diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h index 6b7e4625240..f7e671911ee 100644 --- a/chromium/base/metrics/statistics_recorder.h +++ b/chromium/base/metrics/statistics_recorder.h @@ -94,12 +94,11 @@ class BASE_EXPORT StatisticsRecorder { static const BucketRanges* RegisterOrDeleteDuplicateRanges( const BucketRanges* ranges); - // Methods for appending histogram data to a string. Only histograms which + // A method for appending histogram data to a string. Only histograms which // have |query| as a substring are written to |output| (an empty string will // process all registered histograms). // - // These methods are thread safe. - static void WriteHTMLGraph(const std::string& query, std::string* output); + // This method is thread safe. static void WriteGraph(const std::string& query, std::string* output); // Returns the histograms with |verbosity_level| as the serialization diff --git a/chromium/base/metrics/ukm_source_id.cc b/chromium/base/metrics/ukm_source_id.cc index ce8c886d068..3513b954a36 100644 --- a/chromium/base/metrics/ukm_source_id.cc +++ b/chromium/base/metrics/ukm_source_id.cc @@ -4,8 +4,10 @@ #include "base/metrics/ukm_source_id.h" +#include <cmath> + #include "base/atomic_sequence_num.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/rand_util.h" namespace base { @@ -13,8 +15,11 @@ namespace base { namespace { const int64_t kLowBitsMask = (INT64_C(1) << 32) - 1; -const int64_t kNumTypeBits = static_cast<int64_t>(UkmSourceId::Type::kMaxValue); -const int64_t kTypeMask = (INT64_C(1) << kNumTypeBits) - 1; + +int64_t GetNumTypeBits() { + return std::ceil( + std::log2(static_cast<int64_t>(UkmSourceId::Type::kMaxValue) + 1)); +} } // namespace @@ -35,15 +40,24 @@ UkmSourceId UkmSourceId::New() { // static UkmSourceId UkmSourceId::FromOtherId(int64_t other_id, UkmSourceId::Type type) { + // Note on syntax: std::ceil and std::log2 are not constexpr functions thus + // these variables cannot be initialized statically in the global scope above. + // Function static initialization here is thread safe; so they are initialized + // at most once. + static const int64_t kNumTypeBits = GetNumTypeBits(); + static const int64_t kTypeMask = (INT64_C(1) << kNumTypeBits) - 1; + const int64_t type_bits = static_cast<int64_t>(type); DCHECK_EQ(type_bits, type_bits & kTypeMask); - // Stores the the type ID in the low bits of the source id, and shift the rest - // of the ID to make room. This could cause the original ID to overflow, but + // Stores the type of the source ID in its lower bits, and shift the rest of + // the ID to make room. This could cause the original ID to overflow, but // that should be rare enough that it won't matter for UKM's purposes. return UkmSourceId((other_id << kNumTypeBits) | type_bits); } UkmSourceId::Type UkmSourceId::GetType() const { + static const int64_t kNumTypeBits = GetNumTypeBits(); + static const int64_t kTypeMask = (INT64_C(1) << kNumTypeBits) - 1; return static_cast<UkmSourceId::Type>(value_ & kTypeMask); } diff --git a/chromium/base/metrics/ukm_source_id.h b/chromium/base/metrics/ukm_source_id.h index 36722357176..3ba01a0d64c 100644 --- a/chromium/base/metrics/ukm_source_id.h +++ b/chromium/base/metrics/ukm_source_id.h @@ -18,7 +18,7 @@ class BASE_EXPORT UkmSourceId { public: enum class Type : int64_t { // Source ids of this type are created via ukm::AssignNewSourceId, to denote - // 'custom' source other than the 4 types below. Source of this type has + // 'custom' source other than the types below. Source of this type has // additional restrictions with logging, as determined by // IsWhitelistedSourceId. UKM = 0, @@ -44,7 +44,12 @@ class BASE_EXPORT UkmSourceId { // type and associated events are expected to be recorded within the same // report interval; it will not be kept in memory between different reports. PAYMENT_APP_ID = 5, - kMaxValue = PAYMENT_APP_ID, + // Source ID for desktop web apps, based on the start_url in the web app + // manifest. A new source of this type and associated events are expected to + // be recorded within the same report interval; it will not be kept in + // memory between different reports. + DESKTOP_WEB_APP_ID = 6, + kMaxValue = DESKTOP_WEB_APP_ID, }; // Default constructor has the invalid value. diff --git a/chromium/base/native_library_fuchsia.cc b/chromium/base/native_library_fuchsia.cc index 898fb44484b..d742defe33d 100644 --- a/chromium/base/native_library_fuchsia.cc +++ b/chromium/base/native_library_fuchsia.cc @@ -18,7 +18,7 @@ #include "base/files/file.h" #include "base/files/file_path.h" #include "base/fuchsia/fuchsia_logging.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/path_service.h" #include "base/posix/safe_strerror.h" #include "base/strings/stringprintf.h" diff --git a/chromium/base/native_library_ios.mm b/chromium/base/native_library_ios.mm index dbcafb41f1e..2c0d718217c 100644 --- a/chromium/base/native_library_ios.mm +++ b/chromium/base/native_library_ios.mm @@ -4,7 +4,8 @@ #include "base/native_library.h" -#include "base/logging.h" +#include "base/check.h" +#include "base/notreached.h" #include "base/strings/string_util.h" diff --git a/chromium/base/no_destructor_unittest.cc b/chromium/base/no_destructor_unittest.cc index 49d314e5a0d..63d5c13e470 100644 --- a/chromium/base/no_destructor_unittest.cc +++ b/chromium/base/no_destructor_unittest.cc @@ -10,7 +10,7 @@ #include "base/atomicops.h" #include "base/barrier_closure.h" #include "base/bind.h" -#include "base/logging.h" +#include "base/check.h" #include "base/system/sys_info.h" #include "base/threading/platform_thread.h" #include "base/threading/simple_thread.h" diff --git a/chromium/base/notreached.cc b/chromium/base/notreached.cc new file mode 100644 index 00000000000..ff54001db2a --- /dev/null +++ b/chromium/base/notreached.cc @@ -0,0 +1,22 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/notreached.h" + +// This is a widely included header and its size has significant impact on +// build time. Try not to raise this limit unless absolutely necessary. See +// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md +#ifndef NACL_TC_REV +#pragma clang max_tokens_here 17000 +#endif + +#include "base/logging.h" + +namespace logging { + +BASE_EXPORT void LogErrorNotReached(const char* file, int line) { + LogMessage(file, line, LOG_ERROR).stream() << "NOTREACHED() hit."; +} + +} // namespace logging diff --git a/chromium/base/notreached.h b/chromium/base/notreached.h new file mode 100644 index 00000000000..b6466b8a493 --- /dev/null +++ b/chromium/base/notreached.h @@ -0,0 +1,46 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_NOTREACHED_H_ +#define BASE_NOTREACHED_H_ + +#include "base/check.h" +#include "base/logging_buildflags.h" + +namespace logging { + +#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED) +void BASE_EXPORT LogErrorNotReached(const char* file, int line); +#define NOTREACHED() \ + true ? ::logging::LogErrorNotReached(__FILE__, __LINE__) \ + : EAT_CHECK_STREAM_PARAMS() +#else +#define NOTREACHED() DCHECK(false) +#endif + +// The NOTIMPLEMENTED() macro annotates codepaths which have not been +// implemented yet. If output spam is a serious concern, +// NOTIMPLEMENTED_LOG_ONCE can be used. +#if DCHECK_IS_ON() +#define NOTIMPLEMENTED() \ + ::logging::CheckError::NotImplemented(__FILE__, __LINE__, \ + __PRETTY_FUNCTION__) \ + .stream() +#else +#define NOTIMPLEMENTED() EAT_CHECK_STREAM_PARAMS() +#endif + +#define NOTIMPLEMENTED_LOG_ONCE() \ + { \ + static bool logged_once = false; \ + if (!logged_once) { \ + NOTIMPLEMENTED(); \ + logged_once = true; \ + } \ + } \ + EAT_CHECK_STREAM_PARAMS() + +} // namespace logging + +#endif // BASE_NOTREACHED_H_ diff --git a/chromium/base/observer_list_perftest.cc b/chromium/base/observer_list_perftest.cc index 08f0af93d14..ae70a84e5c3 100644 --- a/chromium/base/observer_list_perftest.cc +++ b/chromium/base/observer_list_perftest.cc @@ -6,7 +6,7 @@ #include <memory> -#include "base/logging.h" +#include "base/check_op.h" #include "base/observer_list.h" #include "base/strings/stringprintf.h" #include "base/time/time.h" diff --git a/chromium/base/one_shot_event.cc b/chromium/base/one_shot_event.cc index f96f7a13894..e813f74abc1 100644 --- a/chromium/base/one_shot_event.cc +++ b/chromium/base/one_shot_event.cc @@ -5,12 +5,12 @@ #include "base/one_shot_event.h" #include <stddef.h> +#include <utility> #include "base/callback.h" #include "base/location.h" #include "base/single_thread_task_runner.h" #include "base/task_runner.h" -#include "base/threading/thread_task_runner_handle.h" #include "base/time/time.h" namespace base { @@ -18,24 +18,22 @@ namespace base { struct OneShotEvent::TaskInfo { TaskInfo() {} TaskInfo(const Location& from_here, - const scoped_refptr<SingleThreadTaskRunner>& runner, + scoped_refptr<SingleThreadTaskRunner> runner, OnceClosure task, const TimeDelta& delay) : from_here(from_here), - runner(runner), + runner(std::move(runner)), task(std::move(task)), delay(delay) { - CHECK(runner.get()); // Detect mistakes with a decent stack frame. + CHECK(this->runner.get()); // Detect mistakes with a decent stack frame. } TaskInfo(TaskInfo&&) = default; + TaskInfo& operator=(TaskInfo&&) = default; Location from_here; scoped_refptr<SingleThreadTaskRunner> runner; OnceClosure task; TimeDelta delay; - - private: - DISALLOW_COPY_AND_ASSIGN(TaskInfo); }; OneShotEvent::OneShotEvent() : signaled_(false) { @@ -48,16 +46,10 @@ OneShotEvent::OneShotEvent(bool signaled) : signaled_(signaled) { } OneShotEvent::~OneShotEvent() {} -void OneShotEvent::Post(const Location& from_here, OnceClosure task) const { - PostImpl(from_here, std::move(task), ThreadTaskRunnerHandle::Get(), - TimeDelta()); -} - -void OneShotEvent::Post( - const Location& from_here, - OnceClosure task, - const scoped_refptr<SingleThreadTaskRunner>& runner) const { - PostImpl(from_here, std::move(task), runner, TimeDelta()); +void OneShotEvent::Post(const Location& from_here, + OnceClosure task, + scoped_refptr<SingleThreadTaskRunner> runner) const { + PostImpl(from_here, std::move(task), std::move(runner), TimeDelta()); } void OneShotEvent::PostDelayed(const Location& from_here, @@ -94,7 +86,7 @@ void OneShotEvent::Signal() { void OneShotEvent::PostImpl(const Location& from_here, OnceClosure task, - const scoped_refptr<SingleThreadTaskRunner>& runner, + scoped_refptr<SingleThreadTaskRunner> runner, const TimeDelta& delay) const { DCHECK(thread_checker_.CalledOnValidThread()); @@ -104,7 +96,7 @@ void OneShotEvent::PostImpl(const Location& from_here, else runner->PostDelayedTask(from_here, std::move(task), delay); } else { - tasks_.emplace_back(from_here, runner, std::move(task), delay); + tasks_.emplace_back(from_here, std::move(runner), std::move(task), delay); } } diff --git a/chromium/base/one_shot_event.h b/chromium/base/one_shot_event.h index b917de299fc..26f84aa8831 100644 --- a/chromium/base/one_shot_event.h +++ b/chromium/base/one_shot_event.h @@ -12,6 +12,7 @@ #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/threading/thread_checker.h" +#include "base/threading/thread_task_runner_handle.h" namespace base { @@ -46,8 +47,10 @@ class BASE_EXPORT OneShotEvent { return signaled_; } - // Causes is_signaled() to return true and all queued tasks to be - // run in an arbitrary order. This method must only be called once. + // Causes is_signaled() to return true and all tasks to be posted to their + // corresponding task runners in the FIFO order. Note that tasks posted to + // different SingleThreadTaskRunners may still execute in arbitrary order. + // This method must only be called once. void Signal(); // Scheduled |task| to be called on |runner| after is_signaled() @@ -60,21 +63,16 @@ class BASE_EXPORT OneShotEvent { // If |*this| is destroyed before being released, none of these // tasks will be executed. // - // Omitting the |runner| argument indicates that |task| should run - // on current thread's TaskRunner. - // - // Tasks may be run in an arbitrary order, not just FIFO. Tasks - // will never be called on the current thread before this function - // returns. Beware that there's no simple way to wait for all tasks - // on a OneShotEvent to complete, so it's almost never safe to use - // base::Unretained() when creating one. - // - // Const because Post() doesn't modify the logical state of this - // object (which is just the is_signaled() bit). - void Post(const Location& from_here, OnceClosure task) const; + // Tasks are posted in FIFO order, however, tasks posted to different + // SingleThreadTaskRunners may still execute in an arbitrary order. Tasks will + // never be called on the current thread before this function returns. Beware + // that there's no simple way to wait for all tasks on a OneShotEvent to + // complete, so it's almost never safe to use base::Unretained() when creating + // one. void Post(const Location& from_here, OnceClosure task, - const scoped_refptr<SingleThreadTaskRunner>& runner) const; + scoped_refptr<SingleThreadTaskRunner> runner = + ThreadTaskRunnerHandle::Get()) const; void PostDelayed(const Location& from_here, OnceClosure task, const TimeDelta& delay) const; @@ -84,7 +82,7 @@ class BASE_EXPORT OneShotEvent { void PostImpl(const Location& from_here, OnceClosure task, - const scoped_refptr<SingleThreadTaskRunner>& runner, + scoped_refptr<SingleThreadTaskRunner> runner, const TimeDelta& delay) const; ThreadChecker thread_checker_; diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc index dbf1ce114a2..bc6e186ca31 100644 --- a/chromium/base/optional_unittest.cc +++ b/chromium/base/optional_unittest.cc @@ -9,6 +9,7 @@ #include <string> #include <vector> +#include "base/macros.h" #include "base/test/gtest_util.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/path_service.cc b/chromium/base/path_service.cc index cc29cab2f1f..8b05f150b36 100644 --- a/chromium/base/path_service.cc +++ b/chromium/base/path_service.cc @@ -12,9 +12,9 @@ #include <shlobj.h> #endif +#include "base/check_op.h" #include "base/files/file_path.h" #include "base/files/file_util.h" -#include "base/logging.h" #include "base/synchronization/lock.h" #include "build/build_config.h" diff --git a/chromium/base/path_service_unittest.cc b/chromium/base/path_service_unittest.cc index 05c676ea16f..e621eba3a10 100644 --- a/chromium/base/path_service_unittest.cc +++ b/chromium/base/path_service_unittest.cc @@ -7,6 +7,7 @@ #include "base/files/file_path.h" #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" +#include "base/logging.h" #include "base/strings/string_util.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest-spi.h" @@ -201,12 +202,12 @@ TEST_F(PathServiceTest, OverrideMultiple) { FilePath fake_cache_dir1(temp_dir.GetPath().AppendASCII("1")); EXPECT_TRUE(PathService::Override(my_special_key, fake_cache_dir1)); EXPECT_TRUE(PathExists(fake_cache_dir1)); - ASSERT_EQ(1, WriteFile(fake_cache_dir1.AppendASCII("t1"), ".", 1)); + ASSERT_TRUE(WriteFile(fake_cache_dir1.AppendASCII("t1"), ".")); FilePath fake_cache_dir2(temp_dir.GetPath().AppendASCII("2")); EXPECT_TRUE(PathService::Override(my_special_key + 1, fake_cache_dir2)); EXPECT_TRUE(PathExists(fake_cache_dir2)); - ASSERT_EQ(1, WriteFile(fake_cache_dir2.AppendASCII("t2"), ".", 1)); + ASSERT_TRUE(WriteFile(fake_cache_dir2.AppendASCII("t2"), ".")); FilePath result; EXPECT_TRUE(PathService::Get(my_special_key, &result)); diff --git a/chromium/base/power_monitor/power_monitor.cc b/chromium/base/power_monitor/power_monitor.cc index 2d3dd073aef..0a48f30f521 100644 --- a/chromium/base/power_monitor/power_monitor.cc +++ b/chromium/base/power_monitor/power_monitor.cc @@ -4,6 +4,7 @@ #include "base/power_monitor/power_monitor.h" +#include <atomic> #include <utility> #include "base/power_monitor/power_monitor_source.h" @@ -11,6 +12,10 @@ namespace base { +namespace { +std::atomic_bool g_is_process_suspended{false}; +} + void PowerMonitor::Initialize(std::unique_ptr<PowerMonitorSource> source) { DCHECK(!IsInitialized()); GetInstance()->source_ = std::move(source); @@ -44,6 +49,11 @@ bool PowerMonitor::IsOnBatteryPower() { void PowerMonitor::ShutdownForTesting() { PowerMonitor::GetInstance()->observers_->AssertEmpty(); GetInstance()->source_ = nullptr; + g_is_process_suspended.store(false); +} + +bool PowerMonitor::IsProcessSuspended() { + return g_is_process_suspended.load(std::memory_order_relaxed); } void PowerMonitor::NotifyPowerStateChange(bool battery_in_use) { @@ -57,16 +67,18 @@ void PowerMonitor::NotifyPowerStateChange(bool battery_in_use) { void PowerMonitor::NotifySuspend() { DCHECK(IsInitialized()); TRACE_EVENT_INSTANT0("base", "PowerMonitor::NotifySuspend", - TRACE_EVENT_SCOPE_GLOBAL); + TRACE_EVENT_SCOPE_PROCESS); DVLOG(1) << "Power Suspending"; + g_is_process_suspended.store(true, std::memory_order_relaxed); GetInstance()->observers_->Notify(FROM_HERE, &PowerObserver::OnSuspend); } void PowerMonitor::NotifyResume() { DCHECK(IsInitialized()); TRACE_EVENT_INSTANT0("base", "PowerMonitor::NotifyResume", - TRACE_EVENT_SCOPE_GLOBAL); + TRACE_EVENT_SCOPE_PROCESS); DVLOG(1) << "Power Resuming"; + g_is_process_suspended.store(false, std::memory_order_relaxed); GetInstance()->observers_->Notify(FROM_HERE, &PowerObserver::OnResume); } diff --git a/chromium/base/power_monitor/power_monitor.h b/chromium/base/power_monitor/power_monitor.h index 1a2f01581ee..fcf5ee482a5 100644 --- a/chromium/base/power_monitor/power_monitor.h +++ b/chromium/base/power_monitor/power_monitor.h @@ -52,6 +52,12 @@ class BASE_EXPORT PowerMonitor { // PowerMonitor has been initialized. static bool IsOnBatteryPower(); + // Is the computer currently in suspend mode. Safe to call on any thread. Safe + // to call even if the PowerMonitor hasn't been initialized. When called + // before initialisation, the process is assumed to not be suspended no matter + // what is the real power state. + static bool IsProcessSuspended(); + // Uninitializes the PowerMonitor. Should be called at the end of any unit // test that mocks out the PowerMonitor, to avoid affecting subsequent tests. // There must be no live PowerObservers when invoked. Safe to call even if the diff --git a/chromium/base/power_monitor/power_monitor_device_source_stub.cc b/chromium/base/power_monitor/power_monitor_device_source_stub.cc index f24e5b23f0a..29ad8d939b4 100644 --- a/chromium/base/power_monitor/power_monitor_device_source_stub.cc +++ b/chromium/base/power_monitor/power_monitor_device_source_stub.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "base/notreached.h" #include "base/power_monitor/power_monitor_device_source.h" namespace base { diff --git a/chromium/base/process/OWNERS b/chromium/base/process/OWNERS new file mode 100644 index 00000000000..5882a768f4e --- /dev/null +++ b/chromium/base/process/OWNERS @@ -0,0 +1,3 @@ +# LaunchProcess() is part of the Fuchsia sandbox. +per-file launch_fuchsia.*=set noparent +per-file launch_fuchsia.*=file://fuchsia/SECURITY_OWNERS diff --git a/chromium/base/process/environment_internal.cc b/chromium/base/process/environment_internal.cc index 357140fa6fd..e8ac997c339 100644 --- a/chromium/base/process/environment_internal.cc +++ b/chromium/base/process/environment_internal.cc @@ -6,6 +6,10 @@ #include <stddef.h> +#if defined(OS_POSIX) || defined(OS_FUCHSIA) +#include <string.h> +#endif + #include <vector> namespace base { diff --git a/chromium/base/process/launch_fuchsia.cc b/chromium/base/process/launch_fuchsia.cc index dea0fcea1be..42304efd0d6 100644 --- a/chromium/base/process/launch_fuchsia.cc +++ b/chromium/base/process/launch_fuchsia.cc @@ -151,7 +151,7 @@ Process LaunchProcess(const std::vector<std::string>& argv, // |clear_environment|, |environment| or |current_directory| are set then we // construct a new (possibly empty) environment, otherwise we let fdio_spawn() // clone the caller's environment into the new process. - uint32_t spawn_flags = FDIO_SPAWN_CLONE_LDSVC | options.spawn_flags; + uint32_t spawn_flags = FDIO_SPAWN_DEFAULT_LDSVC | options.spawn_flags; EnvironmentMap environ_modifications = options.environment; if (!options.current_directory.empty()) { diff --git a/chromium/base/process/launch_mac.cc b/chromium/base/process/launch_mac.cc index 62ffd51c4dd..6fc6ba115fb 100644 --- a/chromium/base/process/launch_mac.cc +++ b/chromium/base/process/launch_mac.cc @@ -89,6 +89,10 @@ class PosixSpawnFileActions { DPSXCHECK(posix_spawn_file_actions_addinherit_np(&file_actions_, filedes)); } + void Chdir(const char* path) API_AVAILABLE(macos(10.15)) { + DPSXCHECK(posix_spawn_file_actions_addchdir_np(&file_actions_, path)); + } + const posix_spawn_file_actions_t* get() const { return &file_actions_; } private: @@ -254,14 +258,19 @@ Process LaunchProcess(const std::vector<std::string>& argv, ? options.real_path.value().c_str() : argv_cstr[0]; - // If the new program has specified its PWD, change the thread-specific - // working directory. The new process will inherit it during posix_spawnp(). if (!options.current_directory.empty()) { - int rv = - ChangeCurrentThreadDirectory(options.current_directory.value().c_str()); - if (rv != 0) { - DPLOG(ERROR) << "pthread_chdir_np"; - return Process(); + const char* chdir_str = options.current_directory.value().c_str(); + if (__builtin_available(macOS 10.15, *)) { + file_actions.Chdir(chdir_str); + } else { + // If the chdir posix_spawn_file_actions extension is not available, + // change the thread-specific working directory. The new process will + // inherit it during posix_spawnp(). + int rv = ChangeCurrentThreadDirectory(chdir_str); + if (rv != 0) { + DPLOG(ERROR) << "pthread_chdir_np"; + return Process(); + } } } @@ -271,10 +280,10 @@ Process LaunchProcess(const std::vector<std::string>& argv, // If |options.mach_ports_for_rendezvous| is specified : the server's lock // must be held for the duration of posix_spawnp() so that new child's PID // can be recorded with the set of ports. - const bool has_mac_ports_for_rendezvous = + const bool has_mach_ports_for_rendezvous = !options.mach_ports_for_rendezvous.empty(); AutoLockMaybe rendezvous_lock( - has_mac_ports_for_rendezvous + has_mach_ports_for_rendezvous ? &MachPortRendezvousServer::GetInstance()->GetLock() : nullptr); @@ -282,7 +291,7 @@ Process LaunchProcess(const std::vector<std::string>& argv, rv = posix_spawnp(&pid, executable_path, file_actions.get(), attr.get(), &argv_cstr[0], new_environ); - if (has_mac_ports_for_rendezvous) { + if (has_mach_ports_for_rendezvous) { auto* rendezvous = MachPortRendezvousServer::GetInstance(); if (rv == 0) { rendezvous->RegisterPortsForPid(pid, options.mach_ports_for_rendezvous); @@ -301,7 +310,12 @@ Process LaunchProcess(const std::vector<std::string>& argv, // Restore the thread's working directory if it was changed. if (!options.current_directory.empty()) { - ResetCurrentThreadDirectory(); + if (__builtin_available(macOS 10.15, *)) { + // Nothing to do because no global state was changed, but + // __builtin_available is special and cannot be negated. + } else { + ResetCurrentThreadDirectory(); + } } if (rv != 0) { diff --git a/chromium/base/process/memory.cc b/chromium/base/process/memory.cc index 4a09c8db734..21651b90be5 100644 --- a/chromium/base/process/memory.cc +++ b/chromium/base/process/memory.cc @@ -8,6 +8,8 @@ #include <windows.h> #endif // defined(OS_WIN) +#include <string.h> + #include "base/debug/alias.h" #include "base/logging.h" #include "base/partition_alloc_buildflags.h" diff --git a/chromium/base/process/process_fuchsia.cc b/chromium/base/process/process_fuchsia.cc index 516a067c1b2..d9e5bada228 100644 --- a/chromium/base/process/process_fuchsia.cc +++ b/chromium/base/process/process_fuchsia.cc @@ -20,6 +20,59 @@ namespace base { +namespace { + +zx::process FindProcessInJobTree(const zx::job& job, ProcessId pid) { + zx::process process; + zx_status_t status = job.get_child(pid, ZX_RIGHT_SAME_RIGHTS, &process); + + if (status == ZX_OK) + return process; + + if (status == ZX_ERR_NOT_FOUND) { + std::vector<zx_koid_t> job_koids(32); + while (true) { + // Fetch the KOIDs of the job children of |job|. + size_t actual = 0u; + size_t available = 0u; + status = job.get_info(ZX_INFO_JOB_CHILDREN, job_koids.data(), + job_koids.size() * sizeof(zx_koid_t), &actual, + &available); + + if (status != ZX_OK) { + ZX_DLOG(ERROR, status) << "zx_object_get_info(JOB_CHILDREN)"; + return zx::process(); + } + + // If |job_koids| was too small then resize it and try again. + if (available > actual) { + job_koids.resize(available); + continue; + } + + // Break out of the loop and iterate over |job_koids|, to find the PID. + job_koids.resize(actual); + break; + } + + for (zx_koid_t job_koid : job_koids) { + zx::job child_job; + if (job.get_child(job_koid, ZX_RIGHT_SAME_RIGHTS, &child_job) != ZX_OK) + continue; + process = FindProcessInJobTree(child_job, pid); + if (process) + return process; + } + + return zx::process(); + } + + ZX_DLOG(ERROR, status) << "zx_object_get_child"; + return zx::process(); +} + +} // namespace + Process::Process(ProcessHandle handle) : process_(handle), is_current_process_(false) { CHECK_NE(handle, zx_process_self()); @@ -54,16 +107,7 @@ Process Process::Open(ProcessId pid) { if (pid == GetCurrentProcId()) return Current(); - // While a process with object id |pid| might exist, the job returned by - // zx::job::default_job() might not contain it, so this call can fail. - zx::process process; - zx_status_t status = - GetDefaultJob()->get_child(pid, ZX_RIGHT_SAME_RIGHTS, &process); - if (status != ZX_OK) { - ZX_DLOG(ERROR, status) << "zx_object_get_child"; - return Process(); - } - return Process(process.release()); + return Process(FindProcessInJobTree(*GetDefaultJob(), pid).release()); } // static diff --git a/chromium/base/process/process_handle.cc b/chromium/base/process/process_handle.cc index 57635fff993..7b130b40b14 100644 --- a/chromium/base/process/process_handle.cc +++ b/chromium/base/process/process_handle.cc @@ -6,7 +6,7 @@ #include <stdint.h> -#include "base/logging.h" +#include "base/check.h" #include "build/build_config.h" namespace base { diff --git a/chromium/base/process/process_iterator_fuchsia.cc b/chromium/base/process/process_iterator_fuchsia.cc index 6d411bad72d..9ea72d924f0 100644 --- a/chromium/base/process/process_iterator_fuchsia.cc +++ b/chromium/base/process/process_iterator_fuchsia.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "base/notreached.h" #include "base/process/process_iterator.h" namespace base { diff --git a/chromium/base/process/process_linux.cc b/chromium/base/process/process_linux.cc index d97226ab0cf..b25df847833 100644 --- a/chromium/base/process/process_linux.cc +++ b/chromium/base/process/process_linux.cc @@ -7,8 +7,9 @@ #include <errno.h> #include <sys/resource.h> +#include "base/check.h" #include "base/files/file_util.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/posix/can_lower_nice_to.h" #include "base/process/internal_linux.h" #include "base/strings/string_number_conversions.h" diff --git a/chromium/base/process/process_metrics.cc b/chromium/base/process/process_metrics.cc index d53c08601c8..3a5aba3373e 100644 --- a/chromium/base/process/process_metrics.cc +++ b/chromium/base/process/process_metrics.cc @@ -6,7 +6,8 @@ #include <utility> -#include "base/logging.h" +#include "base/check.h" +#include "base/notreached.h" #include "base/values.h" #include "build/build_config.h" diff --git a/chromium/base/process/process_metrics_ios.cc b/chromium/base/process/process_metrics_ios.cc index 83fc3d69331..ae36b7d4fc6 100644 --- a/chromium/base/process/process_metrics_ios.cc +++ b/chromium/base/process/process_metrics_ios.cc @@ -6,11 +6,13 @@ #include <limits.h> #include <mach/task.h> +#include <malloc/malloc.h> #include <stddef.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/mac/scoped_mach_port.h" #include "base/memory/ptr_util.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" namespace base { @@ -97,4 +99,10 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) { return true; } +size_t ProcessMetrics::GetMallocUsage() { + malloc_statistics_t stats; + malloc_zone_statistics(nullptr, &stats); + return stats.size_in_use; +} + } // namespace base diff --git a/chromium/base/process/process_metrics_posix.cc b/chromium/base/process/process_metrics_posix.cc index 044bd8d244f..518652573b4 100644 --- a/chromium/base/process/process_metrics_posix.cc +++ b/chromium/base/process/process_metrics_posix.cc @@ -102,7 +102,16 @@ void IncreaseFdLimitTo(unsigned int max_descriptors) { #endif // !defined(OS_FUCHSIA) size_t GetPageSize() { - return getpagesize(); + static const size_t pagesize = []() -> size_t { + // For more information see getpagesize(2). Portable applications should use + // sysconf(_SC_PAGESIZE) rather than getpagesize() if it's available. +#if defined(_SC_PAGESIZE) + return sysconf(_SC_PAGESIZE); +#else + return getpagesize(); +#endif + }(); + return pagesize; } size_t ProcessMetrics::GetMallocUsage() { diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc index 539a6aa4660..808a7be09d0 100644 --- a/chromium/base/process/process_metrics_unittest.cc +++ b/chromium/base/process/process_metrics_unittest.cc @@ -694,7 +694,7 @@ TEST(ProcessMetricsTest, GetDiskUsageBytesPerSecond) { // Write a megabyte on disk. const int kMegabyte = 1024 * 1014; std::string data(kMegabyte, 'x'); - ASSERT_EQ(kMegabyte, base::WriteFile(temp_path, data.c_str(), data.size())); + ASSERT_TRUE(base::WriteFile(temp_path, data)); // Validate that the counters move up. EXPECT_GT(metrics->GetDiskUsageBytesPerSecond(), 0U); diff --git a/chromium/base/profiler/metadata_recorder.cc b/chromium/base/profiler/metadata_recorder.cc index fd171d4dd6e..a6e3cc22dde 100644 --- a/chromium/base/profiler/metadata_recorder.cc +++ b/chromium/base/profiler/metadata_recorder.cc @@ -8,6 +8,20 @@ namespace base { +const size_t MetadataRecorder::MAX_METADATA_COUNT; + +MetadataRecorder::Item::Item(uint64_t name_hash, + Optional<int64_t> key, + int64_t value) + : name_hash(name_hash), key(key), value(value) {} + +MetadataRecorder::Item::Item() : name_hash(0), value(0) {} + +MetadataRecorder::Item::Item(const Item& other) = default; + +MetadataRecorder::Item& MetadataRecorder::Item::Item::operator=( + const Item& other) = default; + MetadataRecorder::ItemInternal::ItemInternal() = default; MetadataRecorder::ItemInternal::~ItemInternal() = default; @@ -90,30 +104,22 @@ void MetadataRecorder::Remove(uint64_t name_hash, Optional<int64_t> key) { } } -MetadataRecorder::ScopedGetItems::ScopedGetItems( +MetadataRecorder::MetadataProvider::MetadataProvider( MetadataRecorder* metadata_recorder) : metadata_recorder_(metadata_recorder), - auto_lock_(&metadata_recorder->read_lock_) {} - -MetadataRecorder::ScopedGetItems::~ScopedGetItems() {} - -// This function is marked as NO_THREAD_SAFETY_ANALYSIS because the analyzer -// doesn't understand that the lock is acquired in the constructor initializer -// list and can therefore be safely released here. -size_t MetadataRecorder::ScopedGetItems::GetItems( - ProfileBuilder::MetadataItemArray* const items) NO_THREAD_SAFETY_ANALYSIS { - size_t item_count = metadata_recorder_->GetItems(items); - auto_lock_.Release(); - return item_count; -} + auto_lock_(metadata_recorder->read_lock_) {} + +MetadataRecorder::MetadataProvider::~MetadataProvider() = default; -std::unique_ptr<ProfileBuilder::MetadataProvider> -MetadataRecorder::CreateMetadataProvider() { - return std::make_unique<MetadataRecorder::ScopedGetItems>(this); +size_t MetadataRecorder::MetadataProvider::GetItems( + ItemArray* const items) const { + // Assertion is only necessary so that thread annotations recognize that + // |read_lock_| is acquired. + metadata_recorder_->read_lock_.AssertAcquired(); + return metadata_recorder_->GetItems(items); } -size_t MetadataRecorder::GetItems( - ProfileBuilder::MetadataItemArray* const items) const { +size_t MetadataRecorder::GetItems(ItemArray* const items) const { read_lock_.AssertAcquired(); // If a writer adds a new item after this load, it will be ignored. We do @@ -134,7 +140,7 @@ size_t MetadataRecorder::GetItems( // Because we wait until |is_active| is set to consider an item active and // that field is always set last, we ignore half-created items. if (item.is_active.load(std::memory_order_acquire)) { - (*items)[write_index++] = ProfileBuilder::MetadataItem{ + (*items)[write_index++] = Item{ item.name_hash, item.key, item.value.load(std::memory_order_relaxed)}; } } @@ -143,8 +149,7 @@ size_t MetadataRecorder::GetItems( } size_t MetadataRecorder::TryReclaimInactiveSlots(size_t item_slots_used) { - const size_t remaining_slots = - ProfileBuilder::MAX_METADATA_COUNT - item_slots_used; + const size_t remaining_slots = MAX_METADATA_COUNT - item_slots_used; if (inactive_item_count_ == 0 || inactive_item_count_ < remaining_slots) { // This reclaiming threshold has a few nice properties: diff --git a/chromium/base/profiler/metadata_recorder.h b/chromium/base/profiler/metadata_recorder.h index d85a3d978c4..6e120dd40b7 100644 --- a/chromium/base/profiler/metadata_recorder.h +++ b/chromium/base/profiler/metadata_recorder.h @@ -10,7 +10,6 @@ #include <utility> #include "base/optional.h" -#include "base/profiler/profile_builder.h" #include "base/synchronization/lock.h" #include "base/thread_annotations.h" @@ -54,11 +53,11 @@ namespace base { // allows readers to preallocate the data structure that we pass back // the metadata in. // -// C) We shouldn't guard writes with a lock that also guards reads. It can take -// ~30us from the time that the sampling thread requests that a thread be -// suspended and the time that it actually happens. If all metadata writes -// block their thread during that time, we're very likely to block all Chrome -// threads for an additional 30us per sample. +// C) We shouldn't guard writes with a lock that also guards reads, since the +// read lock is held from the time that the sampling thread requests that a +// thread be suspended up to the time that the thread is resumed. If all +// metadata writes block their thread during that time, we're very likely to +// block all Chrome threads. // // Ramifications: // @@ -94,8 +93,8 @@ namespace base { // // - No thread is using the recorder. // -// - A single writer is writing into the recorder without a simultaneous -// read. The write will succeed. +// - A single writer is writing into the recorder without a simultaneous read. +// The write will succeed. // // - A reader is reading from the recorder without a simultaneous write. The // read will succeed. @@ -128,6 +127,23 @@ class BASE_EXPORT MetadataRecorder { MetadataRecorder(const MetadataRecorder&) = delete; MetadataRecorder& operator=(const MetadataRecorder&) = delete; + struct BASE_EXPORT Item { + Item(uint64_t name_hash, Optional<int64_t> key, int64_t value); + Item(); + + Item(const Item& other); + Item& operator=(const Item& other); + + // The hash of the metadata name, as produced by HashMetricName(). + uint64_t name_hash; + // The key if specified when setting the item. + Optional<int64_t> key; + // The value of the metadata item. + int64_t value; + }; + static constexpr size_t MAX_METADATA_COUNT = 50; + typedef std::array<Item, MAX_METADATA_COUNT> ItemArray; + // Sets a value for a (|name_hash|, |key|) pair, overwriting any value // previously set for the pair. Nullopt keys are treated as just another key // state for the purpose of associating values. @@ -137,62 +153,49 @@ class BASE_EXPORT MetadataRecorder { // effect if such an item does not exist. void Remove(uint64_t name_hash, Optional<int64_t> key); - // Creates a MetadataProvider object for the recorder, which acquires the - // necessary exclusive read lock and provides access to the recorder's items - // via its GetItems() function. Reclaiming of inactive slots in the recorder - // can't occur while this object lives, so it should be created as soon before - // it's needed as possible. Calling GetItems() releases the lock held by the - // object and can therefore only be called once during the object's lifetime. + // An object that provides access to a MetadataRecorder's items and holds the + // necessary exclusive read lock until the object is destroyed. Reclaiming of + // inactive slots in the recorder can't occur while this object lives, so it + // should be created as soon before it's needed as possible and released as + // soon as possible. // - // This object should be created *before* suspending the target - // thread. Otherwise, that thread might be suspended while reclaiming inactive - // slots and holding the read lock, which would cause the sampling thread to - // deadlock. + // This object should be created *before* suspending the target thread and + // destroyed after resuming the target thread. Otherwise, that thread might be + // suspended while reclaiming inactive slots and holding the read lock, which + // would cause the sampling thread to deadlock. // // Example usage: // // MetadataRecorder r; - // base::ProfileBuilder::MetadataItemArray arr; + // base::MetadataRecorder::ItemArray arr; // size_t item_count; // ... // { - // auto get_items = r.CreateMetadataProvider(); - // item_count = get_items.GetItems(arr); + // MetadtaRecorder::MetadataProvider provider; + // item_count = provider.GetItems(arr); // } - std::unique_ptr<ProfileBuilder::MetadataProvider> CreateMetadataProvider(); - - private: - // An object that provides access to a MetadataRecorder's items and holds the - // necessary exclusive read lock until either GetItems() is called or the - // object is destroyed. - // - // For usage and more details, see CreateMetadataProvider(). - class SCOPED_LOCKABLE ScopedGetItems - : public ProfileBuilder::MetadataProvider { + class SCOPED_LOCKABLE BASE_EXPORT MetadataProvider { public: // Acquires an exclusive read lock on the metadata recorder which is held - // until either GetItems() is called or the object is destroyed. - ScopedGetItems(MetadataRecorder* metadata_recorder) - EXCLUSIVE_LOCK_FUNCTION(metadata_recorder->read_lock_); - ~ScopedGetItems() override UNLOCK_FUNCTION(metadata_recorder_->read_lock_); - ScopedGetItems(const ScopedGetItems&) = delete; - ScopedGetItems& operator=(const ScopedGetItems&) = delete; + // until the object is destroyed. + explicit MetadataProvider(MetadataRecorder* metadata_recorder) + EXCLUSIVE_LOCK_FUNCTION(metadata_recorder_->read_lock_); + ~MetadataProvider() UNLOCK_FUNCTION(); + MetadataProvider(const MetadataProvider&) = delete; + MetadataProvider& operator=(const MetadataProvider&) = delete; // Retrieves the first |available_slots| items in the metadata recorder and // copies them into |items|, returning the number of metadata items that // were copied. To ensure that all items can be copied, |available slots| // should be greater than or equal to |MAX_METADATA_COUNT|. - // - // This function releases the lock held by the object and can therefore only - // be called once during the object's lifetime. - size_t GetItems(ProfileBuilder::MetadataItemArray* const items) override - EXCLUSIVE_LOCKS_REQUIRED(metadata_recorder_->read_lock_); + size_t GetItems(ItemArray* const items) const; private: const MetadataRecorder* const metadata_recorder_; - base::ReleasableAutoLock auto_lock_; + base::AutoLock auto_lock_; }; + private: // TODO(charliea): Support large quantities of metadata efficiently. struct ItemInternal { ItemInternal(); @@ -228,17 +231,14 @@ class BASE_EXPORT MetadataRecorder { // after the reclamation. size_t TryReclaimInactiveSlots(size_t item_slots_used) EXCLUSIVE_LOCKS_REQUIRED(write_lock_) LOCKS_EXCLUDED(read_lock_); - // Also protected by read_lock_, but current thread annotation limitations - // prevent us from using thread annotations with locks acquired through - // Lock::Try(). Updates item_slots_used_ to reflect the new item count and - // returns the number of item slots used after the reclamation. + // Updates item_slots_used_ to reflect the new item count and returns the + // number of item slots used after the reclamation. size_t ReclaimInactiveSlots(size_t item_slots_used) - EXCLUSIVE_LOCKS_REQUIRED(write_lock_); + EXCLUSIVE_LOCKS_REQUIRED(write_lock_) + EXCLUSIVE_LOCKS_REQUIRED(read_lock_); - // Protected by read_lock_, but current thread annotation limitations - // prevent us from using thread annotations with locks acquired through - // Lock::Try(). - size_t GetItems(ProfileBuilder::MetadataItemArray* const items) const; + size_t GetItems(ItemArray* const items) const + EXCLUSIVE_LOCKS_REQUIRED(read_lock_); // Metadata items that the recorder has seen. Rather than implementing the // metadata recorder as a dense array, we implement it as a sparse array where @@ -248,7 +248,7 @@ class BASE_EXPORT MetadataRecorder { // // For the rationale behind this design (along with others considered), see // https://docs.google.com/document/d/18shLhVwuFbLl_jKZxCmOfRB98FmNHdKl0yZZZ3aEO4U/edit#. - std::array<ItemInternal, ProfileBuilder::MAX_METADATA_COUNT> items_; + std::array<ItemInternal, MAX_METADATA_COUNT> items_; // The number of item slots used in the metadata map. // @@ -267,11 +267,6 @@ class BASE_EXPORT MetadataRecorder { // A lock that guards against a reader trying to read items_ while inactive // slots are being reclaimed. - // - // Note that we can't enforce that this lock is properly acquired through - // thread annotations because thread annotations doesn't understand that - // ScopedGetItems::GetItems() can only be called between ScopedGetItems's - // constructor and destructor. base::Lock read_lock_; }; diff --git a/chromium/base/profiler/metadata_recorder_unittest.cc b/chromium/base/profiler/metadata_recorder_unittest.cc index 79abb21a0de..4aff812a044 100644 --- a/chromium/base/profiler/metadata_recorder_unittest.cc +++ b/chromium/base/profiler/metadata_recorder_unittest.cc @@ -11,21 +11,22 @@ namespace base { -bool operator==(const ProfileBuilder::MetadataItem& lhs, - const ProfileBuilder::MetadataItem& rhs) { +bool operator==(const MetadataRecorder::Item& lhs, + const MetadataRecorder::Item& rhs) { return lhs.name_hash == rhs.name_hash && lhs.value == rhs.value; } -bool operator<(const ProfileBuilder::MetadataItem& lhs, - const ProfileBuilder::MetadataItem& rhs) { +bool operator<(const MetadataRecorder::Item& lhs, + const MetadataRecorder::Item& rhs) { return lhs.name_hash < rhs.name_hash; } TEST(MetadataRecorderTest, GetItems_Empty) { MetadataRecorder recorder; - ProfileBuilder::MetadataItemArray items; + MetadataRecorder::ItemArray items; - size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items); + size_t item_count = + MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); EXPECT_EQ(0u, item_count); } @@ -35,10 +36,10 @@ TEST(MetadataRecorderTest, Set_NewNameHash) { recorder.Set(10, nullopt, 20); - ProfileBuilder::MetadataItemArray items; + MetadataRecorder::ItemArray items; size_t item_count; { - item_count = recorder.CreateMetadataProvider()->GetItems(&items); + item_count = MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(1u, item_count); EXPECT_EQ(10u, items[0].name_hash); EXPECT_FALSE(items[0].key.has_value()); @@ -48,7 +49,7 @@ TEST(MetadataRecorderTest, Set_NewNameHash) { recorder.Set(20, nullopt, 30); { - item_count = recorder.CreateMetadataProvider()->GetItems(&items); + item_count = MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(2u, item_count); EXPECT_EQ(20u, items[1].name_hash); EXPECT_FALSE(items[1].key.has_value()); @@ -61,8 +62,9 @@ TEST(MetadataRecorderTest, Set_ExistingNameNash) { recorder.Set(10, nullopt, 20); recorder.Set(10, nullopt, 30); - ProfileBuilder::MetadataItemArray items; - size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items); + MetadataRecorder::ItemArray items; + size_t item_count = + MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(1u, item_count); EXPECT_EQ(10u, items[0].name_hash); EXPECT_FALSE(items[0].key.has_value()); @@ -71,10 +73,10 @@ TEST(MetadataRecorderTest, Set_ExistingNameNash) { TEST(MetadataRecorderTest, Set_ReAddRemovedNameNash) { MetadataRecorder recorder; - ProfileBuilder::MetadataItemArray items; - std::vector<ProfileBuilder::MetadataItem> expected; + MetadataRecorder::ItemArray items; + std::vector<MetadataRecorder::Item> expected; for (size_t i = 0; i < items.size(); ++i) { - expected.push_back(ProfileBuilder::MetadataItem{i, nullopt, 0}); + expected.push_back(MetadataRecorder::Item{i, nullopt, 0}); recorder.Set(i, nullopt, 0); } @@ -85,14 +87,15 @@ TEST(MetadataRecorderTest, Set_ReAddRemovedNameNash) { recorder.Remove(3, nullopt); recorder.Set(3, nullopt, 0); - size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items); + size_t item_count = + MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); EXPECT_EQ(items.size(), item_count); EXPECT_THAT(expected, ::testing::UnorderedElementsAreArray(items)); } TEST(MetadataRecorderTest, Set_AddPastMaxCount) { MetadataRecorder recorder; - ProfileBuilder::MetadataItemArray items; + MetadataRecorder::ItemArray items; for (size_t i = 0; i < items.size(); ++i) { recorder.Set(i, nullopt, 0); } @@ -106,10 +109,10 @@ TEST(MetadataRecorderTest, Set_NulloptKeyIsIndependentOfNonNulloptKey) { recorder.Set(10, 100, 20); - ProfileBuilder::MetadataItemArray items; + MetadataRecorder::ItemArray items; size_t item_count; { - item_count = recorder.CreateMetadataProvider()->GetItems(&items); + item_count = MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(1u, item_count); EXPECT_EQ(10u, items[0].name_hash); ASSERT_TRUE(items[0].key.has_value()); @@ -120,7 +123,7 @@ TEST(MetadataRecorderTest, Set_NulloptKeyIsIndependentOfNonNulloptKey) { recorder.Set(10, nullopt, 30); { - item_count = recorder.CreateMetadataProvider()->GetItems(&items); + item_count = MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(2u, item_count); EXPECT_EQ(10u, items[0].name_hash); @@ -141,8 +144,9 @@ TEST(MetadataRecorderTest, Remove) { recorder.Set(50, nullopt, 60); recorder.Remove(30, nullopt); - ProfileBuilder::MetadataItemArray items; - size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items); + MetadataRecorder::ItemArray items; + size_t item_count = + MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(2u, item_count); EXPECT_EQ(10u, items[0].name_hash); EXPECT_FALSE(items[0].key.has_value()); @@ -157,8 +161,9 @@ TEST(MetadataRecorderTest, Remove_DoesntExist) { recorder.Set(10, nullopt, 20); recorder.Remove(20, nullopt); - ProfileBuilder::MetadataItemArray items; - size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items); + MetadataRecorder::ItemArray items; + size_t item_count = + MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(1u, item_count); EXPECT_EQ(10u, items[0].name_hash); EXPECT_FALSE(items[0].key.has_value()); @@ -173,8 +178,9 @@ TEST(MetadataRecorderTest, Remove_NulloptKeyIsIndependentOfNonNulloptKey) { recorder.Remove(10, nullopt); - ProfileBuilder::MetadataItemArray items; - size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items); + MetadataRecorder::ItemArray items; + size_t item_count = + MetadataRecorder::MetadataProvider(&recorder).GetItems(&items); ASSERT_EQ(1u, item_count); EXPECT_EQ(10u, items[0].name_hash); ASSERT_TRUE(items[0].key.has_value()); @@ -185,34 +191,34 @@ TEST(MetadataRecorderTest, Remove_NulloptKeyIsIndependentOfNonNulloptKey) { TEST(MetadataRecorderTest, ReclaimInactiveSlots) { MetadataRecorder recorder; - std::set<ProfileBuilder::MetadataItem> items_set; + std::set<MetadataRecorder::Item> items_set; // Fill up the metadata map. - for (size_t i = 0; i < ProfileBuilder::MAX_METADATA_COUNT; ++i) { + for (size_t i = 0; i < MetadataRecorder::MAX_METADATA_COUNT; ++i) { recorder.Set(i, nullopt, i); - items_set.insert(ProfileBuilder::MetadataItem{i, nullopt, i}); + items_set.insert(MetadataRecorder::Item{i, nullopt, i}); } // Remove every fourth entry to fragment the data. size_t entries_removed = 0; - for (size_t i = 3; i < ProfileBuilder::MAX_METADATA_COUNT; i += 4) { + for (size_t i = 3; i < MetadataRecorder::MAX_METADATA_COUNT; i += 4) { recorder.Remove(i, nullopt); ++entries_removed; - items_set.erase(ProfileBuilder::MetadataItem{i, nullopt, i}); + items_set.erase(MetadataRecorder::Item{i, nullopt, i}); } // Ensure that the inactive slots are reclaimed to make room for more entries. for (size_t i = 1; i <= entries_removed; ++i) { recorder.Set(i * 100, nullopt, i * 100); - items_set.insert(ProfileBuilder::MetadataItem{i * 100, nullopt, i * 100}); + items_set.insert(MetadataRecorder::Item{i * 100, nullopt, i * 100}); } - ProfileBuilder::MetadataItemArray items_arr; + MetadataRecorder::ItemArray items_arr; std::copy(items_set.begin(), items_set.end(), items_arr.begin()); - ProfileBuilder::MetadataItemArray recorder_items; + MetadataRecorder::ItemArray recorder_items; size_t recorder_item_count = - recorder.CreateMetadataProvider()->GetItems(&recorder_items); - EXPECT_EQ(recorder_item_count, ProfileBuilder::MAX_METADATA_COUNT); + MetadataRecorder::MetadataProvider(&recorder).GetItems(&recorder_items); + EXPECT_EQ(recorder_item_count, MetadataRecorder::MAX_METADATA_COUNT); EXPECT_THAT(recorder_items, ::testing::UnorderedElementsAreArray(items_arr)); } @@ -220,7 +226,7 @@ TEST(MetadataRecorderTest, MetadataSlotsUsedUmaHistogram) { MetadataRecorder recorder; HistogramTester histogram_tester; - for (size_t i = 0; i < ProfileBuilder::MAX_METADATA_COUNT; ++i) { + for (size_t i = 0; i < MetadataRecorder::MAX_METADATA_COUNT; ++i) { recorder.Set(i * 10, nullopt, i * 100); } diff --git a/chromium/base/profiler/module_cache_mac.cc b/chromium/base/profiler/module_cache_mac.cc index 2d895b7baef..30568aa2f8d 100644 --- a/chromium/base/profiler/module_cache_mac.cc +++ b/chromium/base/profiler/module_cache_mac.cc @@ -6,69 +6,108 @@ #include <dlfcn.h> #include <mach-o/getsect.h> +#include <string.h> #include <uuid/uuid.h> #include "base/strings/string_number_conversions.h" +#include "build/build_config.h" namespace base { namespace { -// Returns the unique build ID for a module loaded at |module_addr|. Returns the -// empty string if the function fails to get the build ID. +#if defined(ARCH_CPU_64_BITS) +using MachHeaderType = mach_header_64; +using SegmentCommandType = segment_command_64; +constexpr uint32_t kMachHeaderMagic = MH_MAGIC_64; +constexpr uint32_t kSegmentCommand = LC_SEGMENT_64; +#else +using MachHeaderType = mach_header; +using SegmentCommandType = segment_command; +constexpr uint32_t kMachHeaderMagic = MH_MAGIC; +constexpr uint32_t kSegmentCommand = LC_SEGMENT; +#endif + +// Returns the unique build ID and text segment size for a module loaded at +// |module_addr|. Returns the empty string and 0 if the function fails to get +// the build ID or size. // // Build IDs are created by the concatenation of the module's GUID (Windows) / // UUID (Mac) and an "age" field that indicates how many times that GUID/UUID // has been reused. In Windows binaries, the "age" field is present in the // module header, but on the Mac, UUIDs are never reused and so the "age" value // appended to the UUID is always 0. -std::string GetUniqueId(const void* module_addr) { - const mach_header_64* mach_header = - reinterpret_cast<const mach_header_64*>(module_addr); - DCHECK_EQ(MH_MAGIC_64, mach_header->magic); - - size_t offset = sizeof(mach_header_64); - size_t offset_limit = sizeof(mach_header_64) + mach_header->sizeofcmds; +void GetUniqueIdAndTextSize(const void* module_addr, + std::string* unique_id, + size_t* text_size) { + const MachHeaderType* mach_header = + reinterpret_cast<const MachHeaderType*>(module_addr); + DCHECK_EQ(mach_header->magic, kMachHeaderMagic); + + size_t offset = sizeof(MachHeaderType); + size_t offset_limit = sizeof(MachHeaderType) + mach_header->sizeofcmds; + bool found_uuid = false; + bool found_text_size = false; for (uint32_t i = 0; i < mach_header->ncmds; ++i) { - if (offset + sizeof(load_command) >= offset_limit) - return std::string(); + if (offset + sizeof(load_command) >= offset_limit) { + unique_id->clear(); + *text_size = 0; + return; + } - const load_command* current_cmd = reinterpret_cast<const load_command*>( + const load_command* load_cmd = reinterpret_cast<const load_command*>( reinterpret_cast<const uint8_t*>(mach_header) + offset); - if (offset + current_cmd->cmdsize > offset_limit) { + if (offset + load_cmd->cmdsize > offset_limit) { // This command runs off the end of the command list. This is malformed. - return std::string(); + unique_id->clear(); + *text_size = 0; + return; } - if (current_cmd->cmd == LC_UUID) { - if (current_cmd->cmdsize < sizeof(uuid_command)) { + if (load_cmd->cmd == LC_UUID) { + if (load_cmd->cmdsize < sizeof(uuid_command)) { // This "UUID command" is too small. This is malformed. - return std::string(); + unique_id->clear(); + } else { + const uuid_command* uuid_cmd = + reinterpret_cast<const uuid_command*>(load_cmd); + static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t), + "UUID field of UUID command should be 16 bytes."); + // The ID comprises the UUID concatenated with the Mac's "age" value + // which is always 0. + unique_id->assign(HexEncode(&uuid_cmd->uuid, sizeof(uuid_cmd->uuid)) + + "0"); } - - const uuid_command* uuid_cmd = - reinterpret_cast<const uuid_command*>(current_cmd); - static_assert(sizeof(uuid_cmd->uuid) == sizeof(uuid_t), - "UUID field of UUID command should be 16 bytes."); - // The ID is comprised of the UUID concatenated with the Mac's "age" value - // which is always 0. - return HexEncode(&uuid_cmd->uuid, sizeof(uuid_cmd->uuid)) + "0"; + if (found_text_size) + return; + found_uuid = true; + } else if (load_cmd->cmd == kSegmentCommand) { + const SegmentCommandType* segment_cmd = + reinterpret_cast<const SegmentCommandType*>(load_cmd); + if (strncmp(segment_cmd->segname, SEG_TEXT, + sizeof(segment_cmd->segname)) == 0) { + *text_size = segment_cmd->vmsize; + // Compare result with library function call, which is slower than this + // code. + unsigned long text_size_from_libmacho; + DCHECK(getsegmentdata(mach_header, SEG_TEXT, &text_size_from_libmacho)); + DCHECK_EQ(*text_size, text_size_from_libmacho); + } + if (found_uuid) + return; + found_text_size = true; } - offset += current_cmd->cmdsize; + offset += load_cmd->cmdsize; } - return std::string(); -} -// Returns the size of the _TEXT segment of the module loaded at |module_addr|. -size_t GetModuleTextSize(const void* module_addr) { - const mach_header_64* mach_header = - reinterpret_cast<const mach_header_64*>(module_addr); - DCHECK_EQ(MH_MAGIC_64, mach_header->magic); - unsigned long module_size; - getsegmentdata(mach_header, SEG_TEXT, &module_size); - return module_size; + if (!found_uuid) { + unique_id->clear(); + } + if (!found_text_size) { + *text_size = 0; + } } } // namespace @@ -77,9 +116,9 @@ class MacModule : public ModuleCache::Module { public: MacModule(const Dl_info& dl_info) : base_address_(reinterpret_cast<uintptr_t>(dl_info.dli_fbase)), - id_(GetUniqueId(dl_info.dli_fbase)), - debug_basename_(FilePath(dl_info.dli_fname).BaseName()), - size_(GetModuleTextSize(dl_info.dli_fbase)) {} + debug_basename_(FilePath(dl_info.dli_fname).BaseName()) { + GetUniqueIdAndTextSize(dl_info.dli_fbase, &id_, &size_); + } MacModule(const MacModule&) = delete; MacModule& operator=(const MacModule&) = delete; diff --git a/chromium/base/profiler/native_unwinder_android.cc b/chromium/base/profiler/native_unwinder_android.cc index d5f9cfeec4e..fa06494aee2 100644 --- a/chromium/base/profiler/native_unwinder_android.cc +++ b/chromium/base/profiler/native_unwinder_android.cc @@ -4,25 +4,226 @@ #include "base/profiler/native_unwinder_android.h" +#include <string> +#include <vector> + +#include <sys/mman.h> + +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Elf.h" +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Maps.h" +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h" +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Regs.h" + +#include "base/memory/ptr_util.h" #include "base/profiler/module_cache.h" #include "base/profiler/native_unwinder.h" #include "base/profiler/profile_builder.h" +#include "base/profiler/unwindstack_internal_android.h" +#include "build/build_config.h" + +#if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/MachineArm.h" +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/RegsArm.h" +#elif defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_64_BITS) +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/MachineArm64.h" +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/RegsArm64.h" +#endif // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) namespace base { +namespace { + +class AndroidModule : public ModuleCache::Module { + public: + AndroidModule(unwindstack::MapInfo* map_info) + : start_(map_info->start), + size_(map_info->end - map_info->start), + build_id_(map_info->GetBuildID()), + name_(map_info->name) {} + ~AndroidModule() override = default; + + uintptr_t GetBaseAddress() const override { return start_; } + + std::string GetId() const override { return build_id_; } + + FilePath GetDebugBasename() const override { return FilePath(name_); } + + // Gets the size of the module. + size_t GetSize() const override { return size_; } + + // True if this is a native module. + bool IsNative() const override { return true; } + + const uintptr_t start_; + const size_t size_; + const std::string build_id_; + const std::string name_; +}; + +std::unique_ptr<unwindstack::Regs> CreateFromRegisterContext( + RegisterContext* thread_context) { +#if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) + return WrapUnique<unwindstack::Regs>(unwindstack::RegsArm::Read( + reinterpret_cast<void*>(&thread_context->arm_r0))); +#elif defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_64_BITS) + return WrapUnique<unwindstack::Regs>(unwindstack::RegsArm64::Read( + reinterpret_cast<void*>(&thread_context->regs[0]))); +#else // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) + NOTREACHED(); + return nullptr; +#endif // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) +} + +void CopyToRegisterContext(unwindstack::Regs* regs, + RegisterContext* thread_context) { +#if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) + memcpy(reinterpret_cast<void*>(&thread_context->arm_r0), regs->RawData(), + unwindstack::ARM_REG_LAST * sizeof(uint32_t)); +#elif defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_64_BITS) + memcpy(reinterpret_cast<void*>(&thread_context->regs[0]), regs->RawData(), + unwindstack::ARM64_REG_LAST * sizeof(uint32_t)); +#else // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) + NOTREACHED(); +#endif // #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) +} + +} // namespace + +// static +std::unique_ptr<unwindstack::Maps> NativeUnwinderAndroid::CreateMaps() { + auto maps = std::make_unique<unwindstack::LocalMaps>(); + if (maps->Parse()) + return maps; + return nullptr; +} + +// static +std::unique_ptr<unwindstack::Memory> +NativeUnwinderAndroid::CreateProcessMemory() { + return std::make_unique<unwindstack::MemoryLocal>(); +} + +void NativeUnwinderAndroid::AddInitialModulesFromMaps( + const unwindstack::Maps& memory_regions_map, + ModuleCache* module_cache) { + for (const auto& region : memory_regions_map) { + // Only add executable regions. + if (!(region->flags & PROT_EXEC)) + continue; + module_cache->AddCustomNativeModule( + std::make_unique<AndroidModule>(region.get())); + } +} + +NativeUnwinderAndroid::NativeUnwinderAndroid( + unwindstack::Maps* memory_regions_map, + unwindstack::Memory* process_memory, + uintptr_t exclude_module_with_base_address) + : memory_regions_map_(memory_regions_map), + process_memory_(process_memory), + exclude_module_with_base_address_(exclude_module_with_base_address) {} + +NativeUnwinderAndroid::~NativeUnwinderAndroid() = default; + +void NativeUnwinderAndroid::AddInitialModules(ModuleCache* module_cache) { + AddInitialModulesFromMaps(*memory_regions_map_, module_cache); +} bool NativeUnwinderAndroid::CanUnwindFrom(const Frame& current_frame) const { - return false; + return current_frame.module && current_frame.module->IsNative() && + current_frame.module->GetBaseAddress() != + exclude_module_with_base_address_; } UnwindResult NativeUnwinderAndroid::TryUnwind(RegisterContext* thread_context, uintptr_t stack_top, ModuleCache* module_cache, std::vector<Frame>* stack) const { - return UnwindResult::ABORTED; + auto regs = CreateFromRegisterContext(thread_context); + DCHECK(regs); + unwindstack::ArchEnum arch = regs->Arch(); + + do { + uint64_t cur_pc = regs->pc(); + uint64_t cur_sp = regs->sp(); + unwindstack::MapInfo* map_info = memory_regions_map_->Find(cur_pc); + if (map_info == nullptr || + map_info->flags & unwindstack::MAPS_FLAGS_DEVICE_MAP) { + break; + } + + unwindstack::Elf* elf = + map_info->GetElf({process_memory_, [](unwindstack::Memory*) {}}, arch); + if (!elf->valid()) + break; + + UnwindStackMemoryAndroid stack_memory(cur_sp, stack_top); + uintptr_t rel_pc = elf->GetRelPc(cur_pc, map_info); + bool finished = false; + bool stepped = + elf->Step(rel_pc, rel_pc, regs.get(), &stack_memory, &finished); + if (stepped && finished) + return UnwindResult::COMPLETED; + + if (!stepped) { + // Stepping failed. Try unwinding using return address. + if (stack->size() == 1) { + if (!regs->SetPcFromReturnAddress(&stack_memory)) + return UnwindResult::ABORTED; + } else { + break; + } + } + + // If the pc and sp didn't change, then consider everything stopped. + if (cur_pc == regs->pc() && cur_sp == regs->sp()) + return UnwindResult::ABORTED; + + // Exclusive range of expected stack pointer values after the unwind. + struct { + uintptr_t start; + uintptr_t end; + } expected_stack_pointer_range = {cur_sp, stack_top}; + if (regs->sp() < expected_stack_pointer_range.start || + regs->sp() >= expected_stack_pointer_range.end) { + return UnwindResult::ABORTED; + } + + if (regs->dex_pc() != 0) { + // Add a frame to represent the dex file. + EmitDexFrame(regs->dex_pc(), module_cache, stack); + + // Clear the dex pc so that we don't repeat this frame later. + regs->set_dex_pc(0); + } + + // Add the frame to |stack|. + const ModuleCache::Module* module = + module_cache->GetModuleForAddress(regs->pc()); + stack->emplace_back(regs->pc(), module); + } while (CanUnwindFrom(stack->back())); + + // Restore registers necessary for further unwinding in |thread_context|. + CopyToRegisterContext(regs.get(), thread_context); + return UnwindResult::UNRECOGNIZED_FRAME; } -std::unique_ptr<Unwinder> CreateNativeUnwinder(ModuleCache* module_cache) { - return std::make_unique<NativeUnwinderAndroid>(); +void NativeUnwinderAndroid::EmitDexFrame(uintptr_t dex_pc, + ModuleCache* module_cache, + std::vector<Frame>* stack) const { + const ModuleCache::Module* module = module_cache->GetModuleForAddress(dex_pc); + if (!module) { + // The region containing |dex_pc| may not be in |module_cache| since it's + // usually not executable (.dex file). Since non-executable regions + // are used much less commonly, it's lazily added here instead of from + // AddInitialModules(). + unwindstack::MapInfo* map_info = memory_regions_map_->Find(dex_pc); + if (map_info) { + auto new_module = std::make_unique<AndroidModule>(map_info); + module = new_module.get(); + module_cache->AddCustomNativeModule(std::move(new_module)); + } + } + stack->emplace_back(dex_pc, module); } } // namespace base diff --git a/chromium/base/profiler/native_unwinder_android.h b/chromium/base/profiler/native_unwinder_android.h index 16f1b7b39aa..926a581b32a 100644 --- a/chromium/base/profiler/native_unwinder_android.h +++ b/chromium/base/profiler/native_unwinder_android.h @@ -7,25 +7,54 @@ #include "base/profiler/unwinder.h" +namespace unwindstack { +class Maps; +class Memory; +} // namespace unwindstack + namespace base { // Native unwinder implementation for Android, using libunwindstack. -// -// TODO(charliea): Implement this class. -// See: https://crbug.com/989102 class NativeUnwinderAndroid : public Unwinder { public: - NativeUnwinderAndroid() = default; + // Creates maps object from /proc/self/maps for use by NativeUnwinderAndroid. + // Since this is an expensive call, the maps object should be re-used across + // all profiles in a process. + static std::unique_ptr<unwindstack::Maps> CreateMaps(); + static std::unique_ptr<unwindstack::Memory> CreateProcessMemory(); + // Adds modules found from executable loaded memory regions to |module_cache|. + static void AddInitialModulesFromMaps( + const unwindstack::Maps& memory_regions_map, + ModuleCache* module_cache); + + // |exclude_module_with_base_address| is used to exclude a specific module + // and let another unwinder take control. TryUnwind() will exit with + // UNRECOGNIZED_FRAME and CanUnwindFrom() will return false when a frame is + // encountered in that module. + NativeUnwinderAndroid(unwindstack::Maps* memory_regions_map, + unwindstack::Memory* process_memory, + uintptr_t exclude_module_with_base_address = 0); + ~NativeUnwinderAndroid() override; NativeUnwinderAndroid(const NativeUnwinderAndroid&) = delete; NativeUnwinderAndroid& operator=(const NativeUnwinderAndroid&) = delete; // Unwinder + void AddInitialModules(ModuleCache* module_cache) override; bool CanUnwindFrom(const Frame& current_frame) const override; UnwindResult TryUnwind(RegisterContext* thread_context, uintptr_t stack_top, ModuleCache* module_cache, std::vector<Frame>* stack) const override; + + private: + void EmitDexFrame(uintptr_t dex_pc, + ModuleCache* module_cache, + std::vector<Frame>* stack) const; + + unwindstack::Maps* const memory_regions_map_; + unwindstack::Memory* const process_memory_; + const uintptr_t exclude_module_with_base_address_; }; } // namespace base diff --git a/chromium/base/profiler/native_unwinder_android_unittest.cc b/chromium/base/profiler/native_unwinder_android_unittest.cc new file mode 100644 index 00000000000..236ab3ae6fc --- /dev/null +++ b/chromium/base/profiler/native_unwinder_android_unittest.cc @@ -0,0 +1,365 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/profiler/native_unwinder_android.h" + +#include <string.h> + +#include "base/android/build_info.h" +#include "base/android/jni_android.h" +#include "base/base_profiler_test_support_jni_headers/TestSupport_jni.h" +#include "base/bind.h" +#include "base/profiler/register_context.h" +#include "base/profiler/stack_buffer.h" +#include "base/profiler/stack_copier_signal.h" +#include "base/profiler/stack_sampling_profiler_test_util.h" +#include "base/profiler/thread_delegate_posix.h" +#include "base/profiler/unwindstack_internal_android.h" +#include "base/test/bind_test_util.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +extern char __executable_start; + +namespace base { + +class TestStackCopierDelegate : public StackCopier::Delegate { + public: + void OnStackCopy() override {} +}; + +std::vector<Frame> CaptureScenario( + UnwindScenario* scenario, + ModuleCache* module_cache, + OnceCallback<void(RegisterContext*, uintptr_t, std::vector<Frame>*)> + unwind_callback) { + std::vector<Frame> sample; + WithTargetThread( + scenario, + BindLambdaForTesting( + [&](SamplingProfilerThreadToken target_thread_token) { + auto stack_copier = std::make_unique<StackCopierSignal>( + std::make_unique<ThreadDelegatePosix>(target_thread_token)); + std::unique_ptr<StackBuffer> stack_buffer = + StackSampler::CreateStackBuffer(); + + RegisterContext thread_context; + uintptr_t stack_top; + TimeTicks timestamp; + TestStackCopierDelegate delegate; + bool success = + stack_copier->CopyStack(stack_buffer.get(), &stack_top, + ×tamp, &thread_context, &delegate); + ASSERT_TRUE(success); + + sample.emplace_back( + RegisterContextInstructionPointer(&thread_context), + module_cache->GetModuleForAddress( + RegisterContextInstructionPointer(&thread_context))); + + std::move(unwind_callback).Run(&thread_context, stack_top, &sample); + })); + + return sample; +} + +// Checks that the expected information is present in sampled frames. +TEST(NativeUnwinderAndroidTest, PlainFunction) { + UnwindScenario scenario(BindRepeating(&CallWithPlainFunction)); + + std::unique_ptr<unwindstack::Maps> maps = NativeUnwinderAndroid::CreateMaps(); + std::unique_ptr<unwindstack::Memory> memory = + NativeUnwinderAndroid::CreateProcessMemory(); + auto unwinder = + std::make_unique<NativeUnwinderAndroid>(maps.get(), memory.get(), 0); + + ModuleCache module_cache; + unwinder->AddInitialModules(&module_cache); + std::vector<Frame> sample = + CaptureScenario(&scenario, &module_cache, + BindLambdaForTesting([&](RegisterContext* thread_context, + uintptr_t stack_top, + std::vector<Frame>* sample) { + ASSERT_TRUE(unwinder->CanUnwindFrom(sample->back())); + UnwindResult result = unwinder->TryUnwind( + thread_context, stack_top, &module_cache, sample); + EXPECT_EQ(UnwindResult::COMPLETED, result); + })); + + // Check that all the modules are valid. + for (const auto& frame : sample) + EXPECT_NE(nullptr, frame.module); + + // The stack should contain a full unwind. + ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(), + scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); +} + +// Checks that the unwinder handles stacks containing dynamically-allocated +// stack memory. +TEST(NativeUnwinderAndroidTest, Alloca) { + UnwindScenario scenario(BindRepeating(&CallWithAlloca)); + + std::unique_ptr<unwindstack::Maps> maps = NativeUnwinderAndroid::CreateMaps(); + std::unique_ptr<unwindstack::Memory> memory = + NativeUnwinderAndroid::CreateProcessMemory(); + auto unwinder = + std::make_unique<NativeUnwinderAndroid>(maps.get(), memory.get(), 0); + + ModuleCache module_cache; + unwinder->AddInitialModules(&module_cache); + std::vector<Frame> sample = + CaptureScenario(&scenario, &module_cache, + BindLambdaForTesting([&](RegisterContext* thread_context, + uintptr_t stack_top, + std::vector<Frame>* sample) { + ASSERT_TRUE(unwinder->CanUnwindFrom(sample->back())); + UnwindResult result = unwinder->TryUnwind( + thread_context, stack_top, &module_cache, sample); + EXPECT_EQ(UnwindResult::COMPLETED, result); + })); + + // Check that all the modules are valid. + for (const auto& frame : sample) + EXPECT_NE(nullptr, frame.module); + + // The stack should contain a full unwind. + ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(), + scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); +} + +// Checks that a stack that runs through another library produces a stack with +// the expected functions. +TEST(NativeUnwinderAndroidTest, OtherLibrary) { + NativeLibrary other_library = LoadOtherLibrary(); + UnwindScenario scenario( + BindRepeating(&CallThroughOtherLibrary, Unretained(other_library))); + + std::unique_ptr<unwindstack::Maps> maps = NativeUnwinderAndroid::CreateMaps(); + std::unique_ptr<unwindstack::Memory> memory = + NativeUnwinderAndroid::CreateProcessMemory(); + auto unwinder = + std::make_unique<NativeUnwinderAndroid>(maps.get(), memory.get(), 0); + + ModuleCache module_cache; + unwinder->AddInitialModules(&module_cache); + std::vector<Frame> sample = + CaptureScenario(&scenario, &module_cache, + BindLambdaForTesting([&](RegisterContext* thread_context, + uintptr_t stack_top, + std::vector<Frame>* sample) { + ASSERT_TRUE(unwinder->CanUnwindFrom(sample->back())); + UnwindResult result = unwinder->TryUnwind( + thread_context, stack_top, &module_cache, sample); + EXPECT_EQ(UnwindResult::COMPLETED, result); + })); + + // The stack should contain a full unwind. + ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(), + scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); +} + +// Check that unwinding is interrupted for excluded modules. +TEST(NativeUnwinderAndroidTest, ExcludeOtherLibrary) { + NativeLibrary other_library = LoadOtherLibrary(); + UnwindScenario scenario( + BindRepeating(&CallThroughOtherLibrary, Unretained(other_library))); + + std::unique_ptr<unwindstack::Maps> maps = NativeUnwinderAndroid::CreateMaps(); + std::unique_ptr<unwindstack::Memory> memory = + NativeUnwinderAndroid::CreateProcessMemory(); + ModuleCache module_cache; + NativeUnwinderAndroid::AddInitialModulesFromMaps(*maps, &module_cache); + + auto unwinder = std::make_unique<NativeUnwinderAndroid>( + maps.get(), memory.get(), + module_cache.GetModuleForAddress(GetAddressInOtherLibrary(other_library)) + ->GetBaseAddress()); + std::vector<Frame> sample = + CaptureScenario(&scenario, &module_cache, + BindLambdaForTesting([&](RegisterContext* thread_context, + uintptr_t stack_top, + std::vector<Frame>* sample) { + ASSERT_TRUE(unwinder->CanUnwindFrom(sample->back())); + EXPECT_EQ(UnwindResult::UNRECOGNIZED_FRAME, + unwinder->TryUnwind(thread_context, stack_top, + &module_cache, sample)); + EXPECT_FALSE(unwinder->CanUnwindFrom(sample->back())); + })); + + ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange()}); + ExpectStackDoesNotContain(sample, {scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); +} + +// Check that unwinding can be resumed after an incomplete unwind. +TEST(NativeUnwinderAndroidTest, ResumeUnwinding) { + NativeLibrary other_library = LoadOtherLibrary(); + UnwindScenario scenario( + BindRepeating(&CallThroughOtherLibrary, Unretained(other_library))); + + std::unique_ptr<unwindstack::Maps> maps = NativeUnwinderAndroid::CreateMaps(); + std::unique_ptr<unwindstack::Memory> memory = + NativeUnwinderAndroid::CreateProcessMemory(); + ModuleCache module_cache; + NativeUnwinderAndroid::AddInitialModulesFromMaps(*maps, &module_cache); + + // Several unwinders are used to unwind different portion of the stack. This + // tests that NativeUnwinderAndroid can pick up from a state in the middle of + // the stack. This emulates having NativeUnwinderAndroid work with other + // unwinders, but doesn't reproduce what happens in production. + auto unwinder_for_all = + std::make_unique<NativeUnwinderAndroid>(maps.get(), memory.get(), 0); + auto unwinder_for_native = std::make_unique<NativeUnwinderAndroid>( + maps.get(), memory.get(), + reinterpret_cast<uintptr_t>(&__executable_start)); + auto unwinder_for_chrome = std::make_unique<NativeUnwinderAndroid>( + maps.get(), memory.get(), + module_cache.GetModuleForAddress(GetAddressInOtherLibrary(other_library)) + ->GetBaseAddress()); + + std::vector<Frame> sample = CaptureScenario( + &scenario, &module_cache, + BindLambdaForTesting([&](RegisterContext* thread_context, + uintptr_t stack_top, + std::vector<Frame>* sample) { + // |unwinder_for_native| unwinds through native frames, but stops at + // chrome frames. It might not contain SampleAddressRange. + ASSERT_TRUE(unwinder_for_native->CanUnwindFrom(sample->back())); + EXPECT_EQ(UnwindResult::UNRECOGNIZED_FRAME, + unwinder_for_native->TryUnwind(thread_context, stack_top, + &module_cache, sample)); + EXPECT_FALSE(unwinder_for_native->CanUnwindFrom(sample->back())); + + ExpectStackDoesNotContain(*sample, + {scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); + size_t prior_stack_size = sample->size(); + + // |unwinder_for_chrome| unwinds through Chrome frames, but stops at + // |other_library|. It won't contain SetupFunctionAddressRange. + ASSERT_TRUE(unwinder_for_chrome->CanUnwindFrom(sample->back())); + EXPECT_EQ(UnwindResult::UNRECOGNIZED_FRAME, + unwinder_for_chrome->TryUnwind(thread_context, stack_top, + &module_cache, sample)); + EXPECT_FALSE(unwinder_for_chrome->CanUnwindFrom(sample->back())); + EXPECT_LT(prior_stack_size, sample->size()); + ExpectStackContains(*sample, {scenario.GetWaitForSampleAddressRange()}); + ExpectStackDoesNotContain(*sample, + {scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); + + // |unwinder_for_all| should complete unwinding through all frames. + ASSERT_TRUE(unwinder_for_all->CanUnwindFrom(sample->back())); + EXPECT_EQ(UnwindResult::COMPLETED, + unwinder_for_all->TryUnwind(thread_context, stack_top, + &module_cache, sample)); + })); + + // The stack should contain a full unwind. + ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(), + scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); +} + +struct JavaTestSupportParams { + OnceClosure wait_for_sample; + FunctionAddressRange range; +}; + +void JNI_TestSupport_InvokeCallbackFunction(JNIEnv* env, jlong context) { + const void* start_program_counter = GetProgramCounter(); + + JavaTestSupportParams* params = + reinterpret_cast<JavaTestSupportParams*>(context); + if (!params->wait_for_sample.is_null()) + std::move(params->wait_for_sample).Run(); + + // Volatile to prevent a tail call to GetProgramCounter(). + const void* volatile end_program_counter = GetProgramCounter(); + + params->range = {start_program_counter, end_program_counter}; +} + +// Checks that java frames can be unwound through. +// Disabled, see: https://crbug.com/1076997 +TEST(NativeUnwinderAndroidTest, DISABLED_JavaFunction) { + auto* build_info = base::android::BuildInfo::GetInstance(); + // Due to varying availability of compiled java unwind tables, unwinding is + // only expected to succeed on > SDK_VERSION_MARSHMALLOW. + bool can_always_unwind = + build_info->sdk_int() > base::android::SDK_VERSION_MARSHMALLOW; + + UnwindScenario scenario(BindLambdaForTesting([](OnceClosure wait_for_sample) { + JNIEnv* env = base::android::AttachCurrentThread(); + JavaTestSupportParams params{std::move(wait_for_sample), {}}; + base::Java_TestSupport_callWithJavaFunction( + env, reinterpret_cast<uintptr_t>(¶ms)); + return params.range; + })); + + std::unique_ptr<unwindstack::Maps> maps = NativeUnwinderAndroid::CreateMaps(); + std::unique_ptr<unwindstack::Memory> memory = + NativeUnwinderAndroid::CreateProcessMemory(); + auto unwinder = + std::make_unique<NativeUnwinderAndroid>(maps.get(), memory.get(), 0); + + ModuleCache module_cache; + unwinder->AddInitialModules(&module_cache); + std::vector<Frame> sample = + CaptureScenario(&scenario, &module_cache, + BindLambdaForTesting([&](RegisterContext* thread_context, + uintptr_t stack_top, + std::vector<Frame>* sample) { + ASSERT_TRUE(unwinder->CanUnwindFrom(sample->back())); + UnwindResult result = unwinder->TryUnwind( + thread_context, stack_top, &module_cache, sample); + if (can_always_unwind) + EXPECT_EQ(UnwindResult::COMPLETED, result); + })); + + // Check that all the modules are valid. + for (const auto& frame : sample) + EXPECT_NE(nullptr, frame.module); + + // The stack should contain a full unwind. + if (can_always_unwind) { + ExpectStackContains(sample, {scenario.GetWaitForSampleAddressRange(), + scenario.GetSetupFunctionAddressRange(), + scenario.GetOuterFunctionAddressRange()}); + } +} + +TEST(NativeUnwinderAndroidTest, UnwindStackMemoryTest) { + std::vector<uint8_t> input = {1, 2, 3, 4, 5}; + uintptr_t begin = reinterpret_cast<uintptr_t>(input.data()); + uintptr_t end = reinterpret_cast<uintptr_t>(input.data() + input.size()); + UnwindStackMemoryAndroid memory(begin, end); + + const auto check_read_fails = [&](uintptr_t addr, size_t size) { + std::vector<uint8_t> output(size); + EXPECT_EQ(0U, memory.Read(addr, output.data(), size)); + }; + const auto check_read_succeeds = [&](uintptr_t addr, size_t size) { + std::vector<uint8_t> output(size); + EXPECT_EQ(size, memory.Read(addr, output.data(), size)); + EXPECT_EQ( + 0, memcmp(reinterpret_cast<const uint8_t*>(addr), output.data(), size)); + }; + + check_read_fails(begin - 1, 1); + check_read_fails(begin - 1, 2); + check_read_fails(end, 1); + check_read_fails(end, 2); + check_read_fails(end - 1, 2); + + check_read_succeeds(begin, 1); + check_read_succeeds(begin, 5); + check_read_succeeds(end - 1, 1); +} + +} // namespace base diff --git a/chromium/base/profiler/native_unwinder_mac.cc b/chromium/base/profiler/native_unwinder_mac.cc index 5e4da9ddefc..51717e1b5f2 100644 --- a/chromium/base/profiler/native_unwinder_mac.cc +++ b/chromium/base/profiler/native_unwinder_mac.cc @@ -9,7 +9,8 @@ #include <mach/vm_map.h> #include <sys/ptrace.h> -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "base/profiler/module_cache.h" #include "base/profiler/native_unwinder.h" #include "base/profiler/profile_builder.h" diff --git a/chromium/base/profiler/native_unwinder_win.cc b/chromium/base/profiler/native_unwinder_win.cc index e056045ed67..344b0901122 100644 --- a/chromium/base/profiler/native_unwinder_win.cc +++ b/chromium/base/profiler/native_unwinder_win.cc @@ -6,6 +6,8 @@ #include <winnt.h> +#include "base/check_op.h" +#include "base/notreached.h" #include "base/profiler/native_unwinder.h" #include "base/profiler/win32_stack_frame_unwinder.h" diff --git a/chromium/base/profiler/profile_builder.cc b/chromium/base/profiler/profile_builder.cc deleted file mode 100644 index 55d8dc18914..00000000000 --- a/chromium/base/profiler/profile_builder.cc +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "base/profiler/profile_builder.h" - -namespace base { - -const size_t ProfileBuilder::MAX_METADATA_COUNT; - -ProfileBuilder::MetadataItem::MetadataItem(uint64_t name_hash, - Optional<int64_t> key, - int64_t value) - : name_hash(name_hash), key(key), value(value) {} - -ProfileBuilder::MetadataItem::MetadataItem() : name_hash(0), value(0) {} - -ProfileBuilder::MetadataItem::MetadataItem(const MetadataItem& other) = default; - -ProfileBuilder::MetadataItem& ProfileBuilder::MetadataItem::MetadataItem:: -operator=(const MetadataItem& other) = default; - -} // namespace base diff --git a/chromium/base/profiler/profile_builder.h b/chromium/base/profiler/profile_builder.h index 0c82b8f8b87..0454d8c2e47 100644 --- a/chromium/base/profiler/profile_builder.h +++ b/chromium/base/profiler/profile_builder.h @@ -8,8 +8,10 @@ #include <memory> #include "base/base_export.h" +#include "base/macros.h" #include "base/optional.h" #include "base/profiler/frame.h" +#include "base/profiler/metadata_recorder.h" #include "base/profiler/module_cache.h" #include "base/time/time.h" @@ -27,39 +29,14 @@ class BASE_EXPORT ProfileBuilder { // up modules from addresses. virtual ModuleCache* GetModuleCache() = 0; - struct BASE_EXPORT MetadataItem { - MetadataItem(uint64_t name_hash, Optional<int64_t> key, int64_t value); - MetadataItem(); - - MetadataItem(const MetadataItem& other); - MetadataItem& operator=(const MetadataItem& other); - - // The hash of the metadata name, as produced by HashMetricName(). - uint64_t name_hash; - // The key if specified when setting the item. - Optional<int64_t> key; - // The value of the metadata item. - int64_t value; - }; - - static constexpr size_t MAX_METADATA_COUNT = 50; - typedef std::array<MetadataItem, MAX_METADATA_COUNT> MetadataItemArray; - - class MetadataProvider { - public: - MetadataProvider() = default; - virtual ~MetadataProvider() = default; - - virtual size_t GetItems(ProfileBuilder::MetadataItemArray* const items) = 0; - }; - // Records metadata to be associated with the current sample. To avoid // deadlock on locks taken by the suspended profiled thread, implementations // of this method must not execute any code that could take a lock, including // heap allocation or use of CHECK/DCHECK/LOG statements. Generally // implementations should simply atomically copy metadata state to be // associated with the sample. - virtual void RecordMetadata(MetadataProvider* metadata_provider) {} + virtual void RecordMetadata( + const MetadataRecorder::MetadataProvider& metadata_provider) {} // Applies the specified metadata |item| to samples collected in the range // [period_start, period_end), iff the profile already captured execution that @@ -67,9 +44,10 @@ class BASE_EXPORT ProfileBuilder { // towards samples in the middle of the period, at the expense of excluding // periods overlapping the start or end of the profile. |period_end| must be // <= TimeTicks::Now(). - virtual void ApplyMetadataRetrospectively(TimeTicks period_start, - TimeTicks period_end, - const MetadataItem& item) {} + virtual void ApplyMetadataRetrospectively( + TimeTicks period_start, + TimeTicks period_end, + const MetadataRecorder::Item& item) {} // Records a new set of frames. Invoked when sampling a sample completes. virtual void OnSampleCompleted(std::vector<Frame> frames, diff --git a/chromium/base/profiler/register_context.h b/chromium/base/profiler/register_context.h index 8ced8dae433..14380a52784 100644 --- a/chromium/base/profiler/register_context.h +++ b/chromium/base/profiler/register_context.h @@ -101,7 +101,7 @@ inline uintptr_t& RegisterContextFramePointer(mcontext_t* context) { } inline uintptr_t& RegisterContextInstructionPointer(mcontext_t* context) { - return AsUintPtr(&context->arm_ip); + return AsUintPtr(&context->arm_pc); } #elif defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_64_BITS) diff --git a/chromium/base/profiler/sample_metadata.cc b/chromium/base/profiler/sample_metadata.cc index ac246f1041a..8bb48d2c00b 100644 --- a/chromium/base/profiler/sample_metadata.cc +++ b/chromium/base/profiler/sample_metadata.cc @@ -10,36 +10,39 @@ namespace base { -ScopedSampleMetadata::ScopedSampleMetadata(StringPiece name, int64_t value) - : name_hash_(HashMetricName(name)) { +SampleMetadata::SampleMetadata(StringPiece name) + : name_hash_(HashMetricName(name)) {} + +void SampleMetadata::Set(int64_t value) { GetSampleMetadataRecorder()->Set(name_hash_, nullopt, value); } -ScopedSampleMetadata::ScopedSampleMetadata(StringPiece name, - int64_t key, - int64_t value) - : name_hash_(HashMetricName(name)), key_(key) { +void SampleMetadata::Set(int64_t key, int64_t value) { GetSampleMetadataRecorder()->Set(name_hash_, key, value); } -ScopedSampleMetadata::~ScopedSampleMetadata() { - GetSampleMetadataRecorder()->Remove(name_hash_, key_); +void SampleMetadata::Remove() { + GetSampleMetadataRecorder()->Remove(name_hash_, nullopt); } -void SetSampleMetadata(StringPiece name, int64_t value) { - GetSampleMetadataRecorder()->Set(HashMetricName(name), nullopt, value); +void SampleMetadata::Remove(int64_t key) { + GetSampleMetadataRecorder()->Remove(name_hash_, key); } -void SetSampleMetadata(StringPiece name, int64_t key, int64_t value) { - GetSampleMetadataRecorder()->Set(HashMetricName(name), key, value); +ScopedSampleMetadata::ScopedSampleMetadata(StringPiece name, int64_t value) + : name_hash_(HashMetricName(name)) { + GetSampleMetadataRecorder()->Set(name_hash_, nullopt, value); } -void RemoveSampleMetadata(StringPiece name) { - GetSampleMetadataRecorder()->Remove(HashMetricName(name), nullopt); +ScopedSampleMetadata::ScopedSampleMetadata(StringPiece name, + int64_t key, + int64_t value) + : name_hash_(HashMetricName(name)), key_(key) { + GetSampleMetadataRecorder()->Set(name_hash_, key, value); } -void RemoveSampleMetadata(StringPiece name, int64_t key) { - GetSampleMetadataRecorder()->Remove(HashMetricName(name), key); +ScopedSampleMetadata::~ScopedSampleMetadata() { + GetSampleMetadataRecorder()->Remove(name_hash_, key_); } // This function is friended by StackSamplingProfiler so must live directly in diff --git a/chromium/base/profiler/sample_metadata.h b/chromium/base/profiler/sample_metadata.h index a372fbafc75..4530e6ba18d 100644 --- a/chromium/base/profiler/sample_metadata.h +++ b/chromium/base/profiler/sample_metadata.h @@ -25,13 +25,15 @@ // For example: // // void DidStartLoad() { -// base::SetSampleMetadata("Renderer.IsLoading", 1); +// is_loading_metadata_.Set(1); // } // // void DidFinishLoad() { -// base::RemoveSampleMetadata("Renderer.IsLoading"); +// is_loading_metadata_.Remove(); // } // +// base::SampleMetadata is_loading_metadata_; +// // Alternatively, ScopedSampleMetadata can be used to ensure that the metadata // is removed correctly. // @@ -51,6 +53,49 @@ namespace base { +class BASE_EXPORT SampleMetadata { + public: + // Set the metadata value associated with |name|. + explicit SampleMetadata(StringPiece name); + + SampleMetadata(const SampleMetadata&) = default; + ~SampleMetadata() = default; + + SampleMetadata& operator=(const SampleMetadata&) = delete; + + // Set the metadata value associated with |name| in the process-global stack + // sampling profiler metadata, overwriting any previous value set for that + // |name|. + void Set(int64_t value); + + // Set the metadata value associated with the pair (|name|, |key|) in the + // process-global stack sampling profiler metadata, overwriting any previous + // value set for that (|name|, |key|) pair. This constructor allows the + // metadata to be associated with an additional user-defined key. One might + // supply a key based on the frame id, for example, to distinguish execution + // in service of scrolling between different frames. Prefer the previous + // function if no user-defined metadata is required. Note: values specified + // for a name and key are stored separately from values specified with only a + // name. + void Set(int64_t key, int64_t value); + + // Removes the metadata item with the specified name from the process-global + // stack sampling profiler metadata. + // + // If such an item doesn't exist, this has no effect. + void Remove(); + + // Removes the metadata item with the specified (|name|, |key|) pair from the + // process-global stack sampling profiler metadata. This function does not + // alter values set with the name |name| but no key. + // + // If such an item doesn't exist, this has no effect. + void Remove(int64_t key); + + private: + const uint64_t name_hash_; +}; + class BASE_EXPORT ScopedSampleMetadata { public: // Set the metadata value associated with |name|. @@ -75,36 +120,6 @@ class BASE_EXPORT ScopedSampleMetadata { Optional<int64_t> key_; }; -// Set the metadata value associated with |name| in the process-global stack -// sampling profiler metadata, overwriting any previous value set for that -// |name|. -BASE_EXPORT void SetSampleMetadata(StringPiece name, int64_t value); - -// Set the metadata value associated with the pair (|name|, |key|) in the -// process-global stack sampling profiler metadata, overwriting any previous -// value set for that (|name|, |key|) pair. This constructor allows the metadata -// to be associated with an additional user-defined key. One might supply a key -// based on the frame id, for example, to distinguish execution in service of -// scrolling between different frames. Prefer the previous function if no -// user-defined metadata is required. Note: values specified for a name and key -// are stored separately from values specified with only a name. -BASE_EXPORT void SetSampleMetadata(StringPiece name, - int64_t key, - int64_t value); - -// Removes the metadata item with the specified name from the process-global -// stack sampling profiler metadata. -// -// If such an item doesn't exist, this has no effect. -BASE_EXPORT void RemoveSampleMetadata(StringPiece name); - -// Removes the metadata item with the specified (|name|, |key|) pair from the -// process-global stack sampling profiler metadata. This function does not alter -// values set with the name |name| but no key. -// -// If such an item doesn't exist, this has no effect. -BASE_EXPORT void RemoveSampleMetadata(StringPiece name, int64_t key); - // Applies the specified metadata to samples already recorded between // |period_start| and |period_end| in all thread's active profiles, subject to // the condition that the profile fully encompasses the period and the profile diff --git a/chromium/base/profiler/sample_metadata_unittest.cc b/chromium/base/profiler/sample_metadata_unittest.cc index fbb9ddf2818..7c76ce4e435 100644 --- a/chromium/base/profiler/sample_metadata_unittest.cc +++ b/chromium/base/profiler/sample_metadata_unittest.cc @@ -10,79 +10,81 @@ namespace base { TEST(SampleMetadataTest, ScopedSampleMetadata) { - ProfileBuilder::MetadataItemArray items; - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + MetadataRecorder::ItemArray items; + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); { ScopedSampleMetadata m("myname", 100); ASSERT_EQ(1u, - GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); EXPECT_EQ(HashMetricName("myname"), items[0].name_hash); EXPECT_FALSE(items[0].key.has_value()); EXPECT_EQ(100, items[0].value); } - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); } TEST(SampleMetadataTest, ScopedSampleMetadataWithKey) { - ProfileBuilder::MetadataItemArray items; - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + MetadataRecorder::ItemArray items; + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); { ScopedSampleMetadata m("myname", 10, 100); ASSERT_EQ(1u, - GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); EXPECT_EQ(HashMetricName("myname"), items[0].name_hash); ASSERT_TRUE(items[0].key.has_value()); EXPECT_EQ(10, *items[0].key); EXPECT_EQ(100, items[0].value); } - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); } TEST(SampleMetadataTest, SampleMetadata) { - ProfileBuilder::MetadataItemArray items; - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); - - SetSampleMetadata("myname", 100); - ASSERT_EQ(1u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + MetadataRecorder::ItemArray items; + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); + + SampleMetadata metadata("myname"); + metadata.Set(100); + ASSERT_EQ(1u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); EXPECT_EQ(HashMetricName("myname"), items[0].name_hash); EXPECT_FALSE(items[0].key.has_value()); EXPECT_EQ(100, items[0].value); - RemoveSampleMetadata("myname"); - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + metadata.Remove(); + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); } TEST(SampleMetadataTest, SampleMetadataWithKey) { - ProfileBuilder::MetadataItemArray items; - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); - - SetSampleMetadata("myname", 10, 100); - ASSERT_EQ(1u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + MetadataRecorder::ItemArray items; + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); + + SampleMetadata metadata("myname"); + metadata.Set(10, 100); + ASSERT_EQ(1u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); EXPECT_EQ(HashMetricName("myname"), items[0].name_hash); ASSERT_TRUE(items[0].key.has_value()); EXPECT_EQ(10, *items[0].key); EXPECT_EQ(100, items[0].value); - RemoveSampleMetadata("myname", 10); - ASSERT_EQ(0u, GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems( - &items)); + metadata.Remove(10); + ASSERT_EQ(0u, MetadataRecorder::MetadataProvider(GetSampleMetadataRecorder()) + .GetItems(&items)); } } // namespace base diff --git a/chromium/base/profiler/stack_copier.h b/chromium/base/profiler/stack_copier.h index b1eedcb6bbf..245ba276d37 100644 --- a/chromium/base/profiler/stack_copier.h +++ b/chromium/base/profiler/stack_copier.h @@ -35,9 +35,6 @@ class BASE_EXPORT StackCopier { // deallocation, including indirectly via use of DCHECK/CHECK or other // logging statements. virtual void OnStackCopy() = 0; - - // Invoked after the stack has been copied and the target thread resumed. - virtual void OnThreadResume() = 0; }; virtual ~StackCopier(); diff --git a/chromium/base/profiler/stack_copier_signal.cc b/chromium/base/profiler/stack_copier_signal.cc index 1b601703e7c..bbef65bf520 100644 --- a/chromium/base/profiler/stack_copier_signal.cc +++ b/chromium/base/profiler/stack_copier_signal.cc @@ -229,8 +229,6 @@ bool StackCopierSignal::CopyStack(StackBuffer* stack_buffer, } } - delegate->OnThreadResume(); - const uintptr_t bottom = RegisterContextStackPointer(params.context); for (uintptr_t* reg : thread_delegate_->GetRegistersToRewrite(thread_context)) { diff --git a/chromium/base/profiler/stack_copier_signal_unittest.cc b/chromium/base/profiler/stack_copier_signal_unittest.cc index 6e2953c1764..2e321b7e9f4 100644 --- a/chromium/base/profiler/stack_copier_signal_unittest.cc +++ b/chromium/base/profiler/stack_copier_signal_unittest.cc @@ -63,25 +63,13 @@ class TargetThread : public SimpleThread { class TestStackCopierDelegate : public StackCopier::Delegate { public: void OnStackCopy() override { - // We can't EXPECT_FALSE(on_thread_resume_was_invoked_) here because that - // invocation is not reentrant. on_stack_copy_was_invoked_ = true; } - void OnThreadResume() override { - EXPECT_TRUE(on_stack_copy_was_invoked_); - on_thread_resume_was_invoked_ = true; - } - bool on_stack_copy_was_invoked() const { return on_stack_copy_was_invoked_; } - bool on_thread_resume_was_invoked() const { - return on_thread_resume_was_invoked_; - } - private: bool on_stack_copy_was_invoked_ = false; - bool on_thread_resume_was_invoked_ = false; }; } // namespace @@ -179,7 +167,6 @@ TEST(StackCopierSignalTest, MAYBE_CopyStackDelegateInvoked) { ASSERT_TRUE(result); EXPECT_TRUE(stack_copier_delegate.on_stack_copy_was_invoked()); - EXPECT_TRUE(stack_copier_delegate.on_thread_resume_was_invoked()); } // Limit to 32-bit Android, which is the platform we care about for this diff --git a/chromium/base/profiler/stack_copier_suspend.cc b/chromium/base/profiler/stack_copier_suspend.cc index d570c77cb01..7e320f776c9 100644 --- a/chromium/base/profiler/stack_copier_suspend.cc +++ b/chromium/base/profiler/stack_copier_suspend.cc @@ -64,8 +64,6 @@ bool StackCopierSuspend::CopyStack(StackBuffer* stack_buffer, StackBuffer::kPlatformStackAlignment, stack_buffer->buffer()); } - delegate->OnThreadResume(); - *stack_top = reinterpret_cast<uintptr_t>(stack_copy_bottom) + (top - bottom); for (uintptr_t* reg : diff --git a/chromium/base/profiler/stack_copier_suspend_unittest.cc b/chromium/base/profiler/stack_copier_suspend_unittest.cc index a81eca588f3..926afd3746c 100644 --- a/chromium/base/profiler/stack_copier_suspend_unittest.cc +++ b/chromium/base/profiler/stack_copier_suspend_unittest.cc @@ -85,25 +85,13 @@ class TestSuspendableThreadDelegate : public SuspendableThreadDelegate { class TestStackCopierDelegate : public StackCopier::Delegate { public: void OnStackCopy() override { - // We can't EXPECT_FALSE(on_thread_resume_was_invoked_) here because that - // invocation is not reentrant. on_stack_copy_was_invoked_ = true; } - void OnThreadResume() override { - EXPECT_TRUE(on_stack_copy_was_invoked_); - on_thread_resume_was_invoked_ = true; - } - bool on_stack_copy_was_invoked() const { return on_stack_copy_was_invoked_; } - bool on_thread_resume_was_invoked() const { - return on_thread_resume_was_invoked_; - } - private: bool on_stack_copy_was_invoked_ = false; - bool on_thread_resume_was_invoked_ = false; }; } // namespace @@ -218,7 +206,6 @@ TEST(StackCopierSuspendTest, CopyStackDelegateInvoked) { ®ister_context, &stack_copier_delegate); EXPECT_TRUE(stack_copier_delegate.on_stack_copy_was_invoked()); - EXPECT_TRUE(stack_copier_delegate.on_thread_resume_was_invoked()); } TEST(StackCopierSuspendTest, RewriteRegisters) { diff --git a/chromium/base/profiler/stack_sampler.h b/chromium/base/profiler/stack_sampler.h index 517c3a99e24..9fc91051e85 100644 --- a/chromium/base/profiler/stack_sampler.h +++ b/chromium/base/profiler/stack_sampler.h @@ -33,8 +33,8 @@ class BASE_EXPORT StackSampler { static std::unique_ptr<StackSampler> Create( SamplingProfilerThreadToken thread_token, ModuleCache* module_cache, - StackSamplerTestDelegate* test_delegate, - std::unique_ptr<Unwinder> native_unwinder = nullptr); + std::unique_ptr<Unwinder> native_unwinder, + StackSamplerTestDelegate* test_delegate); // Gets the required size of the stack buffer. static size_t GetStackBufferSize(); @@ -47,7 +47,9 @@ class BASE_EXPORT StackSampler { // thread being sampled). // Adds an auxiliary unwinder to handle additional, non-native-code unwind - // scenarios. + // scenarios. When attempting to unwind, the relative priority of auxiliary + // unwinders is the inverse of the order of insertion, and the native + // unwinder is given the lowest priority virtual void AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) = 0; // Records a set of frames and returns them. diff --git a/chromium/base/profiler/stack_sampler_android.cc b/chromium/base/profiler/stack_sampler_android.cc index 80ec117fc81..8414de9e3f4 100644 --- a/chromium/base/profiler/stack_sampler_android.cc +++ b/chromium/base/profiler/stack_sampler_android.cc @@ -6,6 +6,7 @@ #include <pthread.h> +#include "base/check.h" #include "base/profiler/stack_copier_signal.h" #include "base/profiler/stack_sampler_impl.h" #include "base/profiler/thread_delegate_posix.h" @@ -17,8 +18,9 @@ namespace base { std::unique_ptr<StackSampler> StackSampler::Create( SamplingProfilerThreadToken thread_token, ModuleCache* module_cache, - StackSamplerTestDelegate* test_delegate, - std::unique_ptr<Unwinder> native_unwinder) { + std::unique_ptr<Unwinder> native_unwinder, + StackSamplerTestDelegate* test_delegate) { + DCHECK(native_unwinder); return std::make_unique<StackSamplerImpl>( std::make_unique<StackCopierSignal>( std::make_unique<ThreadDelegatePosix>(thread_token)), diff --git a/chromium/base/profiler/stack_sampler_impl.cc b/chromium/base/profiler/stack_sampler_impl.cc index 336decd9f8b..9903b33feea 100644 --- a/chromium/base/profiler/stack_sampler_impl.cc +++ b/chromium/base/profiler/stack_sampler_impl.cc @@ -6,8 +6,10 @@ #include <utility> +#include "base/check.h" #include "base/compiler_specific.h" #include "base/logging.h" +#include "base/profiler/metadata_recorder.h" #include "base/profiler/profile_builder.h" #include "base/profiler/sample_metadata.h" #include "base/profiler/stack_buffer.h" @@ -31,16 +33,13 @@ namespace { // the thread is suspended. class StackCopierDelegate : public StackCopier::Delegate { public: - StackCopierDelegate(ModuleCache* module_cache, - Unwinder* native_unwinder, - Unwinder* aux_unwinder, - ProfileBuilder* profile_builder) - : module_cache_(module_cache), - native_unwinder_(native_unwinder), - aux_unwinder_(aux_unwinder), + StackCopierDelegate( + const base::circular_deque<std::unique_ptr<Unwinder>>* unwinders, + ProfileBuilder* profile_builder, + MetadataRecorder::MetadataProvider* metadata_provider) + : unwinders_(unwinders), profile_builder_(profile_builder), - metadata_provider_( - GetSampleMetadataRecorder()->CreateMetadataProvider()) {} + metadata_provider_(metadata_provider) {} StackCopierDelegate(const StackCopierDelegate&) = delete; StackCopierDelegate& operator=(const StackCopierDelegate&) = delete; @@ -51,35 +50,16 @@ class StackCopierDelegate : public StackCopier::Delegate { // particular, it may not perform any heap allocation or deallocation, // including indirectly via use of DCHECK/CHECK or other logging statements. void OnStackCopy() override { - native_unwinder_->OnStackCapture(); - if (aux_unwinder_) - aux_unwinder_->OnStackCapture(); - -#if !defined(OS_POSIX) || defined(OS_MACOSX) - profile_builder_->RecordMetadata(metadata_provider_.get()); -#else - // TODO(https://crbug.com/1056283): Support metadata recording on POSIX - // platforms. - ALLOW_UNUSED_LOCAL(profile_builder_); -#endif - } - - void OnThreadResume() override { - // Reset this as soon as possible because it may hold a lock on the - // metadata. - metadata_provider_.reset(); + for (const auto& unwinder : *unwinders_) + unwinder->OnStackCapture(); - native_unwinder_->UpdateModules(module_cache_); - if (aux_unwinder_) - aux_unwinder_->UpdateModules(module_cache_); + profile_builder_->RecordMetadata(*metadata_provider_); } private: - ModuleCache* const module_cache_; - Unwinder* const native_unwinder_; - Unwinder* const aux_unwinder_; + const base::circular_deque<std::unique_ptr<Unwinder>>* unwinders_; ProfileBuilder* const profile_builder_; - std::unique_ptr<ProfileBuilder::MetadataProvider> metadata_provider_; + const MetadataRecorder::MetadataProvider* const metadata_provider_; }; } // namespace @@ -89,15 +69,17 @@ StackSamplerImpl::StackSamplerImpl(std::unique_ptr<StackCopier> stack_copier, ModuleCache* module_cache, StackSamplerTestDelegate* test_delegate) : stack_copier_(std::move(stack_copier)), - native_unwinder_(std::move(native_unwinder)), module_cache_(module_cache), - test_delegate_(test_delegate) {} + test_delegate_(test_delegate) { + DCHECK(native_unwinder); + unwinders_.push_front(std::move(native_unwinder)); +} StackSamplerImpl::~StackSamplerImpl() = default; void StackSamplerImpl::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) { - aux_unwinder_ = std::move(unwinder); - aux_unwinder_->AddInitialModules(module_cache_); + unwinder->AddInitialModules(module_cache_); + unwinders_.push_front(std::move(unwinder)); } void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer, @@ -107,19 +89,26 @@ void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer, RegisterContext thread_context; uintptr_t stack_top; TimeTicks timestamp; - StackCopierDelegate delegate(module_cache_, native_unwinder_.get(), - aux_unwinder_.get(), profile_builder); - bool success = stack_copier_->CopyStack(stack_buffer, &stack_top, ×tamp, - &thread_context, &delegate); - if (!success) - return; + { + // Make this scope as small as possible because |metadata_provider| is + // holding a lock. + MetadataRecorder::MetadataProvider metadata_provider( + GetSampleMetadataRecorder()); + StackCopierDelegate delegate(&unwinders_, profile_builder, + &metadata_provider); + bool success = stack_copier_->CopyStack( + stack_buffer, &stack_top, ×tamp, &thread_context, &delegate); + if (!success) + return; + } + for (const auto& unwinder : unwinders_) + unwinder->UpdateModules(module_cache_); if (test_delegate_) test_delegate_->OnPreStackWalk(); profile_builder->OnSampleCompleted( - WalkStack(module_cache_, &thread_context, stack_top, - native_unwinder_.get(), aux_unwinder_.get()), + WalkStack(module_cache_, &thread_context, stack_top, unwinders_), timestamp); } @@ -128,18 +117,16 @@ std::vector<Frame> StackSamplerImpl::WalkStackForTesting( ModuleCache* module_cache, RegisterContext* thread_context, uintptr_t stack_top, - Unwinder* native_unwinder, - Unwinder* aux_unwinder) { - return WalkStack(module_cache, thread_context, stack_top, native_unwinder, - aux_unwinder); + const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) { + return WalkStack(module_cache, thread_context, stack_top, unwinders); } // static -std::vector<Frame> StackSamplerImpl::WalkStack(ModuleCache* module_cache, - RegisterContext* thread_context, - uintptr_t stack_top, - Unwinder* native_unwinder, - Unwinder* aux_unwinder) { +std::vector<Frame> StackSamplerImpl::WalkStack( + ModuleCache* module_cache, + RegisterContext* thread_context, + uintptr_t stack_top, + const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) { std::vector<Frame> stack; // Reserve enough memory for most stacks, to avoid repeated // allocations. Approximately 99.9% of recorded stacks are 128 frames or @@ -154,21 +141,24 @@ std::vector<Frame> StackSamplerImpl::WalkStack(ModuleCache* module_cache, size_t prior_stack_size; UnwindResult result; do { - // Choose an authoritative unwinder for the current module. Use the aux - // unwinder if it thinks it can unwind from the current frame, otherwise use - // the native unwinder. - Unwinder* unwinder = - aux_unwinder && aux_unwinder->CanUnwindFrom(stack.back()) - ? aux_unwinder - : native_unwinder; + // Choose an authoritative unwinder for the current module. Use the first + // unwinder that thinks it can unwind from the current frame. + auto unwinder = + std::find_if(unwinders.begin(), unwinders.end(), + [&stack](const std::unique_ptr<Unwinder>& unwinder) { + return unwinder->CanUnwindFrom(stack.back()); + }); + if (unwinder == unwinders.end()) + return stack; prior_stack_size = stack.size(); - result = - unwinder->TryUnwind(thread_context, stack_top, module_cache, &stack); + result = unwinder->get()->TryUnwind(thread_context, stack_top, module_cache, + &stack); // The native unwinder should be the only one that returns COMPLETED // since the stack starts in native code. - DCHECK(result != UnwindResult::COMPLETED || unwinder == native_unwinder); + DCHECK(result != UnwindResult::COMPLETED || + unwinder->get() == unwinders.back().get()); } while (result != UnwindResult::ABORTED && result != UnwindResult::COMPLETED && // Give up if the authoritative unwinder for the module was unable to diff --git a/chromium/base/profiler/stack_sampler_impl.h b/chromium/base/profiler/stack_sampler_impl.h index 725905e0ea9..cceee652f12 100644 --- a/chromium/base/profiler/stack_sampler_impl.h +++ b/chromium/base/profiler/stack_sampler_impl.h @@ -8,6 +8,7 @@ #include <memory> #include "base/base_export.h" +#include "base/containers/circular_deque.h" #include "base/profiler/frame.h" #include "base/profiler/register_context.h" #include "base/profiler/stack_copier.h" @@ -36,22 +37,22 @@ class BASE_EXPORT StackSamplerImpl : public StackSampler { ProfileBuilder* profile_builder) override; // Exposes the internal function for unit testing. - static std::vector<Frame> WalkStackForTesting(ModuleCache* module_cache, - RegisterContext* thread_context, - uintptr_t stack_top, - Unwinder* native_unwinder, - Unwinder* aux_unwinder); + static std::vector<Frame> WalkStackForTesting( + ModuleCache* module_cache, + RegisterContext* thread_context, + uintptr_t stack_top, + const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders); private: - static std::vector<Frame> WalkStack(ModuleCache* module_cache, - RegisterContext* thread_context, - uintptr_t stack_top, - Unwinder* native_unwinder, - Unwinder* aux_unwinder); + static std::vector<Frame> WalkStack( + ModuleCache* module_cache, + RegisterContext* thread_context, + uintptr_t stack_top, + const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders); const std::unique_ptr<StackCopier> stack_copier_; - const std::unique_ptr<Unwinder> native_unwinder_; - std::unique_ptr<Unwinder> aux_unwinder_; + // Store all unwinder in decreasing priority order. + base::circular_deque<std::unique_ptr<Unwinder>> unwinders_; ModuleCache* const module_cache_; StackSamplerTestDelegate* const test_delegate_; }; diff --git a/chromium/base/profiler/stack_sampler_impl_unittest.cc b/chromium/base/profiler/stack_sampler_impl_unittest.cc index 30a40af2ee3..e3cd67ce59b 100644 --- a/chromium/base/profiler/stack_sampler_impl_unittest.cc +++ b/chromium/base/profiler/stack_sampler_impl_unittest.cc @@ -9,6 +9,7 @@ #include <numeric> #include <utility> +#include "base/memory/ptr_util.h" #include "base/profiler/module_cache.h" #include "base/profiler/profile_builder.h" #include "base/profiler/stack_buffer.h" @@ -37,7 +38,7 @@ class TestProfileBuilder : public ProfileBuilder { // ProfileBuilder ModuleCache* GetModuleCache() override { return module_cache_; } void RecordMetadata( - ProfileBuilder::MetadataProvider* metadata_provider) override {} + const MetadataRecorder::MetadataProvider& metadata_provider) override {} void OnSampleCompleted(std::vector<Frame> frames, TimeTicks sample_timestamp) override { @@ -97,7 +98,6 @@ class DelegateInvokingStackCopier : public StackCopier { RegisterContext* thread_context, Delegate* delegate) override { delegate->OnStackCopy(); - delegate->OnThreadResume(); return true; } }; @@ -268,6 +268,17 @@ class FakeTestUnwinder : public Unwinder { std::vector<Result> results_; }; +base::circular_deque<std::unique_ptr<Unwinder>> MakeUnwinderList( + std::unique_ptr<Unwinder> native_unwinder, + std::unique_ptr<Unwinder> aux_unwinder) { + base::circular_deque<std::unique_ptr<Unwinder>> unwinders; + if (aux_unwinder) + unwinders.push_back(std::move(aux_unwinder)); + if (native_unwinder) + unwinders.push_back(std::move(native_unwinder)); + return unwinders; +} + } // namespace // TODO(crbug.com/1001923): Fails on Linux MSan. @@ -351,10 +362,12 @@ TEST(StackSamplerImplTest, WalkStack_Completed) { RegisterContextInstructionPointer(&thread_context) = GetTestInstructionPointer(); module_cache.AddCustomNativeModule(std::make_unique<TestModule>(1u, 1u)); - FakeTestUnwinder native_unwinder({{UnwindResult::COMPLETED, {1u}}}); + auto native_unwinder = + WrapUnique(new FakeTestUnwinder({{UnwindResult::COMPLETED, {1u}}})); std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting( - &module_cache, &thread_context, 0u, &native_unwinder, nullptr); + &module_cache, &thread_context, 0u, + MakeUnwinderList(std::move(native_unwinder), nullptr)); ASSERT_EQ(2u, stack.size()); EXPECT_EQ(1u, stack[1].instruction_pointer); @@ -366,10 +379,12 @@ TEST(StackSamplerImplTest, WalkStack_Aborted) { RegisterContextInstructionPointer(&thread_context) = GetTestInstructionPointer(); module_cache.AddCustomNativeModule(std::make_unique<TestModule>(1u, 1u)); - FakeTestUnwinder native_unwinder({{UnwindResult::ABORTED, {1u}}}); + auto native_unwinder = + WrapUnique(new FakeTestUnwinder({{UnwindResult::ABORTED, {1u}}})); std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting( - &module_cache, &thread_context, 0u, &native_unwinder, nullptr); + &module_cache, &thread_context, 0u, + MakeUnwinderList(std::move(native_unwinder), nullptr)); ASSERT_EQ(2u, stack.size()); EXPECT_EQ(1u, stack[1].instruction_pointer); @@ -380,10 +395,12 @@ TEST(StackSamplerImplTest, WalkStack_NotUnwound) { RegisterContext thread_context; RegisterContextInstructionPointer(&thread_context) = GetTestInstructionPointer(); - FakeTestUnwinder native_unwinder({{UnwindResult::UNRECOGNIZED_FRAME, {}}}); + auto native_unwinder = WrapUnique( + new FakeTestUnwinder({{UnwindResult::UNRECOGNIZED_FRAME, {}}})); std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting( - &module_cache, &thread_context, 0u, &native_unwinder, nullptr); + &module_cache, &thread_context, 0u, + MakeUnwinderList(std::move(native_unwinder), nullptr)); ASSERT_EQ(1u, stack.size()); } @@ -400,10 +417,11 @@ TEST(StackSamplerImplTest, WalkStack_AuxUnwind) { {}, ToModuleVector(std::make_unique<TestModule>( GetTestInstructionPointer(), 1u, false))); - FakeTestUnwinder aux_unwinder({{UnwindResult::ABORTED, {1u}}}); - + auto aux_unwinder = + WrapUnique(new FakeTestUnwinder({{UnwindResult::ABORTED, {1u}}})); std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting( - &module_cache, &thread_context, 0u, nullptr, &aux_unwinder); + &module_cache, &thread_context, 0u, + MakeUnwinderList(nullptr, std::move(aux_unwinder))); ASSERT_EQ(2u, stack.size()); EXPECT_EQ(GetTestInstructionPointer(), stack[0].instruction_pointer); @@ -422,12 +440,14 @@ TEST(StackSamplerImplTest, WalkStack_AuxThenNative) { // Inject a fake native module for the second frame. module_cache.AddCustomNativeModule(std::make_unique<TestModule>(1u, 1u)); - FakeTestUnwinder aux_unwinder( - {{UnwindResult::UNRECOGNIZED_FRAME, {1u}}, false}); - FakeTestUnwinder native_unwinder({{UnwindResult::COMPLETED, {2u}}}); + auto aux_unwinder = WrapUnique( + new FakeTestUnwinder({{UnwindResult::UNRECOGNIZED_FRAME, {1u}}, false})); + auto native_unwinder = + WrapUnique(new FakeTestUnwinder({{UnwindResult::COMPLETED, {2u}}})); std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting( - &module_cache, &thread_context, 0u, &native_unwinder, &aux_unwinder); + &module_cache, &thread_context, 0u, + MakeUnwinderList(std::move(native_unwinder), std::move(aux_unwinder))); ASSERT_EQ(3u, stack.size()); EXPECT_EQ(0u, stack[0].instruction_pointer); @@ -449,13 +469,15 @@ TEST(StackSamplerImplTest, WalkStack_NativeThenAux) { module_cache.UpdateNonNativeModules( {}, ToModuleVector(std::make_unique<TestModule>(1u, 1u, false))); - FakeTestUnwinder aux_unwinder( - {{false}, {UnwindResult::UNRECOGNIZED_FRAME, {2u}}, {false}}); - FakeTestUnwinder native_unwinder({{UnwindResult::UNRECOGNIZED_FRAME, {1u}}, - {UnwindResult::COMPLETED, {3u}}}); + auto aux_unwinder = WrapUnique(new FakeTestUnwinder( + {{false}, {UnwindResult::UNRECOGNIZED_FRAME, {2u}}, {false}})); + auto native_unwinder = + WrapUnique(new FakeTestUnwinder({{UnwindResult::UNRECOGNIZED_FRAME, {1u}}, + {UnwindResult::COMPLETED, {3u}}})); std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting( - &module_cache, &thread_context, 0u, &native_unwinder, &aux_unwinder); + &module_cache, &thread_context, 0u, + MakeUnwinderList(std::move(native_unwinder), std::move(aux_unwinder))); ASSERT_EQ(4u, stack.size()); EXPECT_EQ(0u, stack[0].instruction_pointer); diff --git a/chromium/base/profiler/stack_sampler_ios.cc b/chromium/base/profiler/stack_sampler_ios.cc index ea2c91ede72..82ad01f3028 100644 --- a/chromium/base/profiler/stack_sampler_ios.cc +++ b/chromium/base/profiler/stack_sampler_ios.cc @@ -13,8 +13,8 @@ namespace base { std::unique_ptr<StackSampler> StackSampler::Create( SamplingProfilerThreadToken thread_token, ModuleCache* module_cache, - StackSamplerTestDelegate* test_delegate, - std::unique_ptr<Unwinder> native_unwinder) { + std::unique_ptr<Unwinder> native_unwinder, + StackSamplerTestDelegate* test_delegate) { return nullptr; } diff --git a/chromium/base/profiler/stack_sampler_mac.cc b/chromium/base/profiler/stack_sampler_mac.cc index ef46c299cae..109f6425835 100644 --- a/chromium/base/profiler/stack_sampler_mac.cc +++ b/chromium/base/profiler/stack_sampler_mac.cc @@ -4,6 +4,7 @@ #include "base/profiler/stack_sampler.h" +#include "base/check.h" #include "base/profiler/native_unwinder_mac.h" #include "base/profiler/stack_copier_suspend.h" #include "base/profiler/stack_sampler_impl.h" @@ -15,8 +16,8 @@ namespace base { std::unique_ptr<StackSampler> StackSampler::Create( SamplingProfilerThreadToken thread_token, ModuleCache* module_cache, - StackSamplerTestDelegate* test_delegate, - std::unique_ptr<Unwinder> native_unwinder) { + std::unique_ptr<Unwinder> native_unwinder, + StackSamplerTestDelegate* test_delegate) { DCHECK(!native_unwinder); return std::make_unique<StackSamplerImpl>( std::make_unique<StackCopierSuspend>( diff --git a/chromium/base/profiler/stack_sampler_posix.cc b/chromium/base/profiler/stack_sampler_posix.cc index a6728419e16..44215298c63 100644 --- a/chromium/base/profiler/stack_sampler_posix.cc +++ b/chromium/base/profiler/stack_sampler_posix.cc @@ -14,8 +14,8 @@ namespace base { std::unique_ptr<StackSampler> StackSampler::Create( SamplingProfilerThreadToken thread_token, ModuleCache* module_cache, - StackSamplerTestDelegate* test_delegate, - std::unique_ptr<Unwinder> native_unwinder) { + std::unique_ptr<Unwinder> native_unwinder, + StackSamplerTestDelegate* test_delegate) { return nullptr; } diff --git a/chromium/base/profiler/stack_sampler_win.cc b/chromium/base/profiler/stack_sampler_win.cc index 6f6ff33900c..c19009b77e4 100644 --- a/chromium/base/profiler/stack_sampler_win.cc +++ b/chromium/base/profiler/stack_sampler_win.cc @@ -4,6 +4,7 @@ #include "base/profiler/stack_sampler.h" +#include "base/check.h" #include "base/profiler/native_unwinder_win.h" #include "base/profiler/stack_copier_suspend.h" #include "base/profiler/stack_sampler_impl.h" @@ -16,8 +17,8 @@ namespace base { std::unique_ptr<StackSampler> StackSampler::Create( SamplingProfilerThreadToken thread_token, ModuleCache* module_cache, - StackSamplerTestDelegate* test_delegate, - std::unique_ptr<Unwinder> native_unwinder) { + std::unique_ptr<Unwinder> native_unwinder, + StackSamplerTestDelegate* test_delegate) { DCHECK(!native_unwinder); #if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM64) return std::make_unique<StackSamplerImpl>( diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc index 62a0ceca452..46634d18aea 100644 --- a/chromium/base/profiler/stack_sampling_profiler.cc +++ b/chromium/base/profiler/stack_sampling_profiler.cc @@ -525,7 +525,7 @@ void StackSamplingProfiler::SamplingThread::ApplyMetadataToPastSamplesTask( Optional<int64_t> key, int64_t value) { DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId()); - ProfileBuilder::MetadataItem item(name_hash, key, value); + MetadataRecorder::Item item(name_hash, key, value); for (auto& id_collection_pair : active_collections_) { id_collection_pair.second->profile_builder->ApplyMetadataRetrospectively( period_start, period_end, item); @@ -693,10 +693,12 @@ StackSamplingProfiler::StackSamplingProfiler( SamplingProfilerThreadToken thread_token, const SamplingParams& params, std::unique_ptr<ProfileBuilder> profile_builder, + std::unique_ptr<Unwinder> native_unwinder, StackSamplerTestDelegate* test_delegate) : StackSamplingProfiler(params, std::move(profile_builder), nullptr) { - sampler_ = StackSampler::Create( - thread_token, profile_builder_->GetModuleCache(), test_delegate); + sampler_ = + StackSampler::Create(thread_token, profile_builder_->GetModuleCache(), + std::move(native_unwinder), test_delegate); } StackSamplingProfiler::StackSamplingProfiler( diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h index 846d2eaf27c..c6784b8a8f4 100644 --- a/chromium/base/profiler/stack_sampling_profiler.h +++ b/chromium/base/profiler/stack_sampling_profiler.h @@ -13,6 +13,7 @@ #include "base/optional.h" #include "base/profiler/profile_builder.h" #include "base/profiler/sampling_profiler_thread_token.h" +#include "base/profiler/unwinder.h" #include "base/synchronization/waitable_event.h" #include "base/threading/platform_thread.h" #include "base/time/time.h" @@ -83,14 +84,17 @@ class BASE_EXPORT StackSamplingProfiler { bool keep_consistent_sampling_interval = true; }; - // Creates a profiler for the specified thread. An optional |test_delegate| - // can be supplied by tests. + // Creates a profiler for the specified thread. |native_unwinder| is required + // on Android since the unwinder is provided outside StackSamplingProfiler, + // but must be null on other platforms. An optional |test_delegate| can be + // supplied by tests. // // The caller must ensure that this object gets destroyed before the thread // exits. StackSamplingProfiler(SamplingProfilerThreadToken thread_token, const SamplingParams& params, std::unique_ptr<ProfileBuilder> profile_builder, + std::unique_ptr<Unwinder> native_unwinder = nullptr, StackSamplerTestDelegate* test_delegate = nullptr); // Same as above function, with custom |sampler| implementation. The sampler diff --git a/chromium/base/profiler/stack_sampling_profiler_test_util.cc b/chromium/base/profiler/stack_sampling_profiler_test_util.cc index f9ad0921a39..9043d0627be 100644 --- a/chromium/base/profiler/stack_sampling_profiler_test_util.cc +++ b/chromium/base/profiler/stack_sampling_profiler_test_util.cc @@ -9,12 +9,24 @@ #include "base/callback.h" #include "base/compiler_specific.h" #include "base/location.h" +#include "base/path_service.h" +#include "base/profiler/stack_buffer.h" #include "base/profiler/stack_sampling_profiler.h" #include "base/profiler/unwinder.h" #include "base/strings/stringprintf.h" #include "base/test/bind_test_util.h" +#include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" +#if defined(OS_WIN) +// Windows doesn't provide an alloca function like Linux does. +// Fortunately, it provides _alloca, which functions identically. +#include <malloc.h> +#define alloca _alloca +#else +#include <alloca.h> +#endif + namespace base { namespace { @@ -35,7 +47,8 @@ class TestProfileBuilder : public ProfileBuilder { // ProfileBuilder: ModuleCache* GetModuleCache() override { return module_cache_; } - void RecordMetadata(MetadataProvider* metadata_provider) override {} + void RecordMetadata( + const MetadataRecorder::MetadataProvider& metadata_provider) override {} void OnSampleCompleted(std::vector<Frame> sample, TimeTicks sample_timestamp) override { @@ -55,6 +68,17 @@ class TestProfileBuilder : public ProfileBuilder { std::vector<Frame> sample_; }; +// The function to be executed by the code in the other library. +void OtherLibraryCallback(void* arg) { + OnceClosure* wait_for_sample = static_cast<OnceClosure*>(arg); + + std::move(*wait_for_sample).Run(); + + // Prevent tail call. + volatile int i = 0; + ALLOW_UNUSED_LOCAL(i); +} + } // namespace TargetThread::TargetThread(OnceClosure to_run) : to_run_(std::move(to_run)) {} @@ -134,6 +158,47 @@ CallWithPlainFunction(OnceClosure wait_for_sample) { return {start_program_counter, end_program_counter}; } +// Disable inlining for this function so that it gets its own stack frame. +NOINLINE FunctionAddressRange CallWithAlloca(OnceClosure wait_for_sample) { + const void* start_program_counter = GetProgramCounter(); + + // Volatile to force a dynamic stack allocation. + const volatile size_t alloca_size = 100; + // Use the memory via volatile writes to prevent the allocation from being + // optimized out. + volatile char* const allocation = + const_cast<volatile char*>(static_cast<char*>(alloca(alloca_size))); + for (volatile char* p = allocation; p < allocation + alloca_size; ++p) + *p = '\0'; + + if (!wait_for_sample.is_null()) + std::move(wait_for_sample).Run(); + + // Volatile to prevent a tail call to GetProgramCounter(). + const void* volatile end_program_counter = GetProgramCounter(); + return {start_program_counter, end_program_counter}; +} + +// Disable inlining for this function so that it gets its own stack frame. +NOINLINE FunctionAddressRange +CallThroughOtherLibrary(NativeLibrary library, OnceClosure wait_for_sample) { + const void* start_program_counter = GetProgramCounter(); + + if (!wait_for_sample.is_null()) { + // A function whose arguments are a function accepting void*, and a void*. + using InvokeCallbackFunction = void (*)(void (*)(void*), void*); + EXPECT_TRUE(library); + InvokeCallbackFunction function = reinterpret_cast<InvokeCallbackFunction>( + GetFunctionPointerFromNativeLibrary(library, "InvokeCallbackFunction")); + EXPECT_TRUE(function); + (*function)(&OtherLibraryCallback, &wait_for_sample); + } + + // Volatile to prevent a tail call to GetProgramCounter(). + const void* volatile end_program_counter = GetProgramCounter(); + return {start_program_counter, end_program_counter}; +} + void WithTargetThread(UnwindScenario* scenario, ProfileCallback profile_callback) { UnwindScenario::SampleEvents events; @@ -246,4 +311,31 @@ void ExpectStackDoesNotContain( } } +NativeLibrary LoadOtherLibrary() { + // The lambda gymnastics works around the fact that we can't use ASSERT_* + // macros in a function returning non-null. + const auto load = [](NativeLibrary* library) { + FilePath other_library_path; + ASSERT_TRUE(PathService::Get(DIR_MODULE, &other_library_path)); + other_library_path = other_library_path.AppendASCII( + GetLoadableModuleName("base_profiler_test_support_library")); + NativeLibraryLoadError load_error; + *library = LoadNativeLibrary(other_library_path, &load_error); + ASSERT_TRUE(*library) << "error loading " << other_library_path.value() + << ": " << load_error.ToString(); + }; + + NativeLibrary library = nullptr; + load(&library); + return library; +} + +uintptr_t GetAddressInOtherLibrary(NativeLibrary library) { + EXPECT_TRUE(library); + uintptr_t address = reinterpret_cast<uintptr_t>( + GetFunctionPointerFromNativeLibrary(library, "InvokeCallbackFunction")); + EXPECT_NE(address, 0u); + return address; +} + } // namespace base diff --git a/chromium/base/profiler/stack_sampling_profiler_test_util.h b/chromium/base/profiler/stack_sampling_profiler_test_util.h index 2e0c2b9c9c1..ff2daebfe14 100644 --- a/chromium/base/profiler/stack_sampling_profiler_test_util.h +++ b/chromium/base/profiler/stack_sampling_profiler_test_util.h @@ -9,8 +9,10 @@ #include <vector> #include "base/callback.h" +#include "base/native_library.h" #include "base/profiler/frame.h" #include "base/profiler/sampling_profiler_thread_token.h" +#include "base/profiler/stack_sampler.h" #include "base/synchronization/waitable_event.h" #include "base/threading/platform_thread.h" @@ -91,6 +93,16 @@ class UnwindScenario { // any special unwinding setup, to exercise the "normal" unwind scenario. FunctionAddressRange CallWithPlainFunction(OnceClosure wait_for_sample); +// Calls into |wait_for_sample| after using alloca(), to test unwinding with a +// frame pointer. +FunctionAddressRange CallWithAlloca(OnceClosure wait_for_sample); + +// Calls into |wait_for_sample| through a function within another library, to +// test unwinding through multiple modules and scenarios involving unloaded +// modules. +FunctionAddressRange CallThroughOtherLibrary(NativeLibrary library, + OnceClosure wait_for_sample); + // The callback to perform profiling on the provided thread. using ProfileCallback = OnceCallback<void(SamplingProfilerThreadToken)>; @@ -122,6 +134,12 @@ void ExpectStackDoesNotContain( const std::vector<Frame>& stack, const std::vector<FunctionAddressRange>& functions); +// Loads the other library, which defines a function to be called in the +// WITH_OTHER_LIBRARY configuration. +NativeLibrary LoadOtherLibrary(); + +uintptr_t GetAddressInOtherLibrary(NativeLibrary library); + } // namespace base #endif // BASE_PROFILER_STACK_SAMPLING_PROFILER_TEST_UTIL_H_ diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc index 071874700f5..b9fc305dd91 100644 --- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc +++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc @@ -20,8 +20,6 @@ #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/metrics/metrics_hashes.h" -#include "base/native_library.h" -#include "base/path_service.h" #include "base/profiler/sample_metadata.h" #include "base/profiler/stack_sampler.h" #include "base/profiler/stack_sampling_profiler.h" @@ -66,69 +64,11 @@ using SamplingParams = StackSamplingProfiler::SamplingParams; namespace { -// Calls into |wait_for_sample| after using alloca(), to test unwinding with a -// frame pointer. -// Disable inlining for this function so that it gets its own stack frame. -NOINLINE FunctionAddressRange CallWithAlloca(OnceClosure wait_for_sample) { - const void* start_program_counter = GetProgramCounter(); - - // Volatile to force a dynamic stack allocation. - const volatile size_t alloca_size = 100; - // Use the memory via volatile writes to prevent the allocation from being - // optimized out. - volatile char* const allocation = - const_cast<volatile char*>(static_cast<char*>(alloca(alloca_size))); - for (volatile char* p = allocation; p < allocation + alloca_size; ++p) - *p = '\0'; - - if (!wait_for_sample.is_null()) - std::move(wait_for_sample).Run(); - - // Volatile to prevent a tail call to GetProgramCounter(). - const void* volatile end_program_counter = GetProgramCounter(); - return {start_program_counter, end_program_counter}; -} - -// The function to be executed by the code in the other library. -void OtherLibraryCallback(void* arg) { - OnceClosure* wait_for_sample = static_cast<OnceClosure*>(arg); - - std::move(*wait_for_sample).Run(); - - // Prevent tail call. - volatile int i = 0; - ALLOW_UNUSED_LOCAL(i); -} - -// Calls into |wait_for_sample| through a function within another library, to -// test unwinding through multiple modules and scenarios involving unloaded -// modules. -// Disable inlining for this function so that it gets its own stack frame. -NOINLINE FunctionAddressRange -CallThroughOtherLibrary(NativeLibrary library, OnceClosure wait_for_sample) { - const void* start_program_counter = GetProgramCounter(); - - if (!wait_for_sample.is_null()) { - // A function whose arguments are a function accepting void*, and a void*. - using InvokeCallbackFunction = void (*)(void (*)(void*), void*); - EXPECT_TRUE(library); - InvokeCallbackFunction function = reinterpret_cast<InvokeCallbackFunction>( - GetFunctionPointerFromNativeLibrary(library, "InvokeCallbackFunction")); - EXPECT_TRUE(function); - - (*function)(&OtherLibraryCallback, &wait_for_sample); - } - - // Volatile to prevent a tail call to GetProgramCounter(). - const void* volatile end_program_counter = GetProgramCounter(); - return {start_program_counter, end_program_counter}; -} - // State provided to the ProfileBuilder's ApplyMetadataRetrospectively function. struct RetrospectiveMetadata { TimeTicks period_start; TimeTicks period_end; - ProfileBuilder::MetadataItem item; + MetadataRecorder::Item item; }; // Profile consists of a set of samples and other sampling information. @@ -165,10 +105,11 @@ class TestProfileBuilder : public ProfileBuilder { // ProfileBuilder: ModuleCache* GetModuleCache() override; void RecordMetadata( - ProfileBuilder::MetadataProvider* metadata_provider) override; - void ApplyMetadataRetrospectively(TimeTicks period_start, - TimeTicks period_end, - const MetadataItem& item) override; + const MetadataRecorder::MetadataProvider& metadata_provider) override; + void ApplyMetadataRetrospectively( + TimeTicks period_start, + TimeTicks period_end, + const MetadataRecorder::Item& item) override; void OnSampleCompleted(std::vector<Frame> sample, TimeTicks sample_timestamp) override; void OnProfileCompleted(TimeDelta profile_duration, @@ -203,14 +144,14 @@ ModuleCache* TestProfileBuilder::GetModuleCache() { } void TestProfileBuilder::RecordMetadata( - ProfileBuilder::MetadataProvider* metadata_provider) { + const MetadataRecorder::MetadataProvider& metadata_provider) { ++record_metadata_count_; } void TestProfileBuilder::ApplyMetadataRetrospectively( TimeTicks period_start, TimeTicks period_end, - const MetadataItem& item) { + const MetadataRecorder::Item& item) { retrospective_metadata_.push_back( RetrospectiveMetadata{period_start, period_end, item}); } @@ -227,27 +168,6 @@ void TestProfileBuilder::OnProfileCompleted(TimeDelta profile_duration, sampling_period}); } -// Loads the other library, which defines a function to be called in the -// WITH_OTHER_LIBRARY configuration. -NativeLibrary LoadOtherLibrary() { - // The lambda gymnastics works around the fact that we can't use ASSERT_* - // macros in a function returning non-null. - const auto load = [](NativeLibrary* library) { - FilePath other_library_path; - ASSERT_TRUE(PathService::Get(DIR_MODULE, &other_library_path)); - other_library_path = other_library_path.AppendASCII( - GetLoadableModuleName("base_profiler_test_support_library")); - NativeLibraryLoadError load_error; - *library = LoadNativeLibrary(other_library_path, &load_error); - ASSERT_TRUE(*library) << "error loading " << other_library_path.value() - << ": " << load_error.ToString(); - }; - - NativeLibrary library = nullptr; - load(&library); - return library; -} - // Unloads |library| and returns when it has completed unloading. Unloading a // library is asynchronous on Windows, so simply calling UnloadNativeLibrary() // is insufficient to ensure it's been unloaded. @@ -293,6 +213,7 @@ struct TestProfilerInfo { profile = std::move(result_profile); completed.Signal(); })), + nullptr, delegate) {} // The order here is important to ensure objects being referenced don't get @@ -426,7 +347,7 @@ void TestLibraryUnload(bool wait_until_unloaded, ModuleCache* module_cache) { profile = std::move(result_profile); sampling_thread_completed.Signal(); })), - &test_delegate); + nullptr, &test_delegate); profiler.Start(); @@ -1439,7 +1360,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, BindLambdaForTesting([&profile](Profile result_profile) { profile = std::move(result_profile); })), - &post_sample_invoker); + nullptr, &post_sample_invoker); profiler.Start(); // Wait for 5 samples to be collected. for (int i = 0; i < 5; ++i) diff --git a/chromium/base/profiler/suspendable_thread_delegate_mac.cc b/chromium/base/profiler/suspendable_thread_delegate_mac.cc index 32fdd475cb2..a31a0caf2b1 100644 --- a/chromium/base/profiler/suspendable_thread_delegate_mac.cc +++ b/chromium/base/profiler/suspendable_thread_delegate_mac.cc @@ -8,7 +8,7 @@ #include <mach/thread_act.h> #include <pthread.h> -#include "base/logging.h" +#include "base/check.h" #include "base/mac/mach_logging.h" #include "base/profiler/profile_builder.h" diff --git a/chromium/base/profiler/suspendable_thread_delegate_win.cc b/chromium/base/profiler/suspendable_thread_delegate_win.cc index e2a30cf34be..1fbc67eb3f2 100644 --- a/chromium/base/profiler/suspendable_thread_delegate_win.cc +++ b/chromium/base/profiler/suspendable_thread_delegate_win.cc @@ -7,8 +7,8 @@ #include <windows.h> #include <winternl.h> +#include "base/check.h" #include "base/debug/alias.h" -#include "base/logging.h" #include "base/profiler/native_unwinder_win.h" #include "build/build_config.h" diff --git a/chromium/base/profiler/unwindstack_internal_android.cc b/chromium/base/profiler/unwindstack_internal_android.cc new file mode 100644 index 00000000000..92328590aa4 --- /dev/null +++ b/chromium/base/profiler/unwindstack_internal_android.cc @@ -0,0 +1,30 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/profiler/unwindstack_internal_android.h" + +#include <string.h> + +#include "base/logging.h" + +namespace base { + +UnwindStackMemoryAndroid::UnwindStackMemoryAndroid(uintptr_t stack_ptr, + uintptr_t stack_top) + : stack_ptr_(stack_ptr), stack_top_(stack_top) { + DCHECK_LE(stack_ptr_, stack_top_); +} + +UnwindStackMemoryAndroid::~UnwindStackMemoryAndroid() = default; + +size_t UnwindStackMemoryAndroid::Read(uint64_t addr, void* dst, size_t size) { + if (addr < stack_ptr_) + return 0; + if (size >= stack_top_ || addr > stack_top_ - size) + return 0; + memcpy(dst, reinterpret_cast<void*>(addr), size); + return size; +} + +} // namespace base
\ No newline at end of file diff --git a/chromium/base/profiler/unwindstack_internal_android.h b/chromium/base/profiler/unwindstack_internal_android.h new file mode 100644 index 00000000000..75058613fc7 --- /dev/null +++ b/chromium/base/profiler/unwindstack_internal_android.h @@ -0,0 +1,34 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_PROFILER_UNWINDSTACK_INTERNAL_ANDROID_H_ +#define BASE_PROFILER_UNWINDSTACK_INTERNAL_ANDROID_H_ + +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Maps.h" +#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h" + +// Avoid including this file directly in a header as it leaks headers from +// libunwindstack. In particular, it's not to be included directly or +// transitively from native_unwinder_android.h + +namespace base { + +// Implementation of unwindstack::Memory that restricts memory access to a stack +// buffer, used by NativeUnwinderAndroid. While unwinding, only memory accesses +// within the stack should be performed to restore registers. +class UnwindStackMemoryAndroid : public unwindstack::Memory { + public: + UnwindStackMemoryAndroid(uintptr_t stack_ptr, uintptr_t stack_top); + ~UnwindStackMemoryAndroid() override; + + size_t Read(uint64_t addr, void* dst, size_t size) override; + + private: + const uintptr_t stack_ptr_; + const uintptr_t stack_top_; +}; + +} // namespace base + +#endif // BASE_PROFILER_UNWINDSTACK_INTERNAL_ANDROID_H_
\ No newline at end of file diff --git a/chromium/base/rand_util.cc b/chromium/base/rand_util.cc index 5881ef25e39..59f3253b409 100644 --- a/chromium/base/rand_util.cc +++ b/chromium/base/rand_util.cc @@ -11,7 +11,7 @@ #include <algorithm> #include <limits> -#include "base/logging.h" +#include "base/check_op.h" #include "base/strings/string_util.h" namespace base { diff --git a/chromium/base/rand_util_nacl.cc b/chromium/base/rand_util_nacl.cc index b26b4086551..85b92a42ef6 100644 --- a/chromium/base/rand_util_nacl.cc +++ b/chromium/base/rand_util_nacl.cc @@ -8,7 +8,7 @@ #include <stddef.h> #include <stdint.h> -#include "base/logging.h" +#include "base/check_op.h" namespace base { diff --git a/chromium/base/rand_util_posix.cc b/chromium/base/rand_util_posix.cc index 767497ca7aa..41ac6a0990e 100644 --- a/chromium/base/rand_util_posix.cc +++ b/chromium/base/rand_util_posix.cc @@ -10,8 +10,8 @@ #include <stdint.h> #include <unistd.h> +#include "base/check.h" #include "base/files/file_util.h" -#include "base/logging.h" #include "base/no_destructor.h" #include "base/posix/eintr_wrapper.h" diff --git a/chromium/base/rand_util_win.cc b/chromium/base/rand_util_win.cc index e85c216fc53..c4c3810fe5b 100644 --- a/chromium/base/rand_util_win.cc +++ b/chromium/base/rand_util_win.cc @@ -18,7 +18,7 @@ #include <algorithm> #include <limits> -#include "base/logging.h" +#include "base/check.h" namespace base { diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc b/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc index d1623e71839..8c97ff2b380 100644 --- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc +++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler_unittest.cc @@ -116,7 +116,13 @@ TEST_F(SamplingHeapProfilerTest, IntervalRandomizationSanity) { EXPECT_NEAR(1000, mean_samples, 100); // 10% tolerance. } +#if defined(OS_IOS) +// iOS devices generally have ~4GB of RAM with no swap and therefore need a +// lower allocation limit here. +const int kNumberOfAllocations = 1000; +#else const int kNumberOfAllocations = 10000; +#endif NOINLINE void Allocate1() { void* p = malloc(400); diff --git a/chromium/base/scoped_clear_last_error.h b/chromium/base/scoped_clear_last_error.h index b19f0436ae6..656dad8787a 100644 --- a/chromium/base/scoped_clear_last_error.h +++ b/chromium/base/scoped_clear_last_error.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -12,7 +12,6 @@ #include "build/build_config.h" namespace base { -namespace internal { // ScopedClearLastError stores and resets the value of thread local error codes // (errno, GetLastError()), and restores them in the destructor. This is useful @@ -41,7 +40,7 @@ class BASE_EXPORT ScopedClearLastError : public ScopedClearLastErrorBase { ~ScopedClearLastError(); private: - unsigned int last_system_error_; + const unsigned long last_system_error_; DISALLOW_COPY_AND_ASSIGN(ScopedClearLastError); }; @@ -52,7 +51,6 @@ using ScopedClearLastError = ScopedClearLastErrorBase; #endif // defined(OS_WIN) -} // namespace internal } // namespace base #endif // BASE_SCOPED_CLEAR_LAST_ERROR_H_ diff --git a/chromium/base/scoped_clear_last_error_unittest.cc b/chromium/base/scoped_clear_last_error_unittest.cc index 3ae6b394100..e7bae2b60a0 100644 --- a/chromium/base/scoped_clear_last_error_unittest.cc +++ b/chromium/base/scoped_clear_last_error_unittest.cc @@ -4,16 +4,15 @@ #include "base/scoped_clear_last_error.h" -#include "base/logging.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" #if defined(OS_WIN) #include <windows.h> +#include "base/logging.h" #endif // defined(OS_WIN) namespace base { -namespace internal { TEST(ScopedClearLastError, TestNoError) { errno = 1; @@ -55,5 +54,4 @@ TEST(ScopedClearLastError, TestErrorWin) { #endif // defined(OS_WIN) -} // namespace internal } // namespace base diff --git a/chromium/base/scoped_clear_last_error_win.cc b/chromium/base/scoped_clear_last_error_win.cc index cdf996359e3..18ebd6997d6 100644 --- a/chromium/base/scoped_clear_last_error_win.cc +++ b/chromium/base/scoped_clear_last_error_win.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -7,16 +7,14 @@ #include <windows.h> namespace base { -namespace internal { ScopedClearLastError::ScopedClearLastError() - : last_system_error_(::GetLastError()) { - ::SetLastError(0); + : ScopedClearLastErrorBase(), last_system_error_(GetLastError()) { + SetLastError(0); } ScopedClearLastError::~ScopedClearLastError() { - ::SetLastError(last_system_error_); + SetLastError(last_system_error_); } -} // namespace internal } // namespace base diff --git a/chromium/base/scoped_generic.h b/chromium/base/scoped_generic.h index f56a1008b70..a2050218ee6 100644 --- a/chromium/base/scoped_generic.h +++ b/chromium/base/scoped_generic.h @@ -6,6 +6,7 @@ #define BASE_SCOPED_GENERIC_H_ #include <stdlib.h> +#include <ostream> #include <algorithm> diff --git a/chromium/base/security_unittest.cc b/chromium/base/security_unittest.cc index 5f90959bdd7..947a2a6592c 100644 --- a/chromium/base/security_unittest.cc +++ b/chromium/base/security_unittest.cc @@ -16,7 +16,6 @@ #include "base/allocator/buildflags.h" #include "base/files/file_util.h" -#include "base/logging.h" #include "base/memory/free_deleter.h" #include "base/sanitizer_buildflags.h" #include "build/build_config.h" diff --git a/chromium/base/sequence_checker.h b/chromium/base/sequence_checker.h index 60ffd75a4f6..9b910eb72ae 100644 --- a/chromium/base/sequence_checker.h +++ b/chromium/base/sequence_checker.h @@ -74,7 +74,7 @@ #define SEQUENCE_CHECKER(name) base::SequenceChecker name #define DCHECK_CALLED_ON_VALID_SEQUENCE(name, ...) \ base::ScopedValidateSequenceChecker SEQUENCE_CHECKER_INTERNAL_UID( \ - scoped_validate_sequence_checker_)(name, ##__VA_ARGS__); + scoped_validate_sequence_checker_)(name, ##__VA_ARGS__) #define DETACH_FROM_SEQUENCE(name) (name).DetachFromSequence() #else // DCHECK_IS_ON() #if __OBJC__ && defined(OS_IOS) && !HAS_FEATURE(objc_cxx_static_assert) diff --git a/chromium/base/sequence_checker_impl.cc b/chromium/base/sequence_checker_impl.cc index ea5d21bf0df..fbf638c95f5 100644 --- a/chromium/base/sequence_checker_impl.cc +++ b/chromium/base/sequence_checker_impl.cc @@ -4,7 +4,7 @@ #include "base/sequence_checker_impl.h" -#include "base/logging.h" +#include "base/check.h" #include "base/memory/ptr_util.h" #include "base/sequence_token.h" #include "base/threading/thread_checker_impl.h" diff --git a/chromium/base/sequence_token.cc b/chromium/base/sequence_token.cc index 3636c7f2ee7..d80d7741278 100644 --- a/chromium/base/sequence_token.cc +++ b/chromium/base/sequence_token.cc @@ -5,7 +5,7 @@ #include "base/sequence_token.h" #include "base/atomic_sequence_num.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/no_destructor.h" #include "base/threading/thread_local.h" diff --git a/chromium/base/strings/safe_sprintf.cc b/chromium/base/strings/safe_sprintf.cc index 89049abd79b..3d351554ef0 100644 --- a/chromium/base/strings/safe_sprintf.cc +++ b/chromium/base/strings/safe_sprintf.cc @@ -35,7 +35,7 @@ // errno = 13 (Access denied) // In most of the anticipated use cases, that's probably the preferred // behavior. -#include "base/logging.h" +#include "base/check.h" #define DEBUG_CHECK RAW_CHECK #else #define DEBUG_CHECK(x) do { if (x) { } } while (0) diff --git a/chromium/base/strings/safe_sprintf_unittest.cc b/chromium/base/strings/safe_sprintf_unittest.cc index bb9908f9289..2c2ec4b0cb9 100644 --- a/chromium/base/strings/safe_sprintf_unittest.cc +++ b/chromium/base/strings/safe_sprintf_unittest.cc @@ -12,7 +12,7 @@ #include <limits> #include <memory> -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "build/build_config.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/strings/string16.cc b/chromium/base/strings/string16.cc index 84962e67101..64a5951d935 100644 --- a/chromium/base/strings/string16.cc +++ b/chromium/base/strings/string16.cc @@ -13,6 +13,8 @@ #elif defined(WCHAR_T_IS_UTF32) +#include <string.h> + #include <ostream> #include "base/strings/string_piece.h" diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc index 8b71b0ae113..bd0a7e435f6 100644 --- a/chromium/base/strings/string_number_conversions.cc +++ b/chromium/base/strings/string_number_conversions.cc @@ -12,7 +12,7 @@ #include <limits> #include <type_traits> -#include "base/logging.h" +#include "base/check_op.h" #include "base/no_destructor.h" #include "base/numerics/safe_math.h" #include "base/strings/string_util.h" diff --git a/chromium/base/strings/string_piece.cc b/chromium/base/strings/string_piece.cc index c789bc2a003..bd00baedee3 100644 --- a/chromium/base/strings/string_piece.cc +++ b/chromium/base/strings/string_piece.cc @@ -6,11 +6,11 @@ #include "base/strings/string_piece.h" #include <limits.h> +#include <string.h> #include <algorithm> #include <ostream> -#include "base/logging.h" #include "base/strings/utf_string_conversions.h" namespace base { diff --git a/chromium/base/strings/stringprintf.cc b/chromium/base/strings/stringprintf.cc index 738cc63bbe8..ad033b535fc 100644 --- a/chromium/base/strings/stringprintf.cc +++ b/chromium/base/strings/stringprintf.cc @@ -9,6 +9,7 @@ #include <vector> +#include "base/logging.h" #include "base/scoped_clear_last_error.h" #include "base/stl_util.h" #include "base/strings/string_util.h" @@ -62,7 +63,7 @@ static void StringAppendVT(std::basic_string<CharT>* dst, va_list ap_copy; va_copy(ap_copy, ap); - base::internal::ScopedClearLastError last_error; + base::ScopedClearLastError last_error; int result = vsnprintfT(stack_buf, base::size(stack_buf), format, ap_copy); va_end(ap_copy); diff --git a/chromium/base/strings/sys_string_conversions_posix.cc b/chromium/base/strings/sys_string_conversions_posix.cc index ad794cae271..07ee94789f9 100644 --- a/chromium/base/strings/sys_string_conversions_posix.cc +++ b/chromium/base/strings/sys_string_conversions_posix.cc @@ -5,6 +5,7 @@ #include "base/strings/sys_string_conversions.h" #include <stddef.h> +#include <string.h> #include <wchar.h> #include "base/strings/string_piece.h" @@ -57,7 +58,6 @@ std::string SysWideToNativeMB(const std::wstring& wide) { // Handle any errors and return an empty string. case static_cast<size_t>(-1): return std::string(); - break; case 0: // We hit an embedded null byte, keep going. ++num_out_chars; @@ -85,7 +85,6 @@ std::string SysWideToNativeMB(const std::wstring& wide) { // Handle any errors and return an empty string. case static_cast<size_t>(-1): return std::string(); - break; case 0: // We hit an embedded null byte, keep going. ++j; // Output is already zeroed. @@ -114,7 +113,6 @@ std::wstring SysNativeMBToWide(StringPiece native_mb) { case static_cast<size_t>(-2): case static_cast<size_t>(-1): return std::wstring(); - break; case 0: // We hit an embedded null byte, keep going. i += 1; @@ -144,7 +142,6 @@ std::wstring SysNativeMBToWide(StringPiece native_mb) { case static_cast<size_t>(-2): case static_cast<size_t>(-1): return std::wstring(); - break; case 0: i += 1; // Skip null byte. break; diff --git a/chromium/base/strings/utf_offset_string_conversions.cc b/chromium/base/strings/utf_offset_string_conversions.cc index 356247e8011..3e7c7daae61 100644 --- a/chromium/base/strings/utf_offset_string_conversions.cc +++ b/chromium/base/strings/utf_offset_string_conversions.cc @@ -9,7 +9,7 @@ #include <algorithm> #include <memory> -#include "base/logging.h" +#include "base/check_op.h" #include "base/strings/string_piece.h" #include "base/strings/utf_string_conversion_utils.h" diff --git a/chromium/base/strings/utf_offset_string_conversions_unittest.cc b/chromium/base/strings/utf_offset_string_conversions_unittest.cc index 7753363110d..c1c761c03fb 100644 --- a/chromium/base/strings/utf_offset_string_conversions_unittest.cc +++ b/chromium/base/strings/utf_offset_string_conversions_unittest.cc @@ -6,7 +6,6 @@ #include <algorithm> -#include "base/logging.h" #include "base/stl_util.h" #include "base/strings/string_piece.h" #include "base/strings/utf_offset_string_conversions.h" diff --git a/chromium/base/strings/utf_string_conversions_unittest.cc b/chromium/base/strings/utf_string_conversions_unittest.cc index cd799d288ee..a79820fe05f 100644 --- a/chromium/base/strings/utf_string_conversions_unittest.cc +++ b/chromium/base/strings/utf_string_conversions_unittest.cc @@ -4,7 +4,6 @@ #include <stddef.h> -#include "base/logging.h" #include "base/stl_util.h" #include "base/strings/string_piece.h" #include "base/strings/string_util.h" diff --git a/chromium/base/sync_socket_nacl.cc b/chromium/base/sync_socket_nacl.cc index 0be491744a6..c22d7869601 100644 --- a/chromium/base/sync_socket_nacl.cc +++ b/chromium/base/sync_socket_nacl.cc @@ -10,7 +10,7 @@ #include <stdio.h> #include <sys/types.h> -#include "base/logging.h" +#include "base/notreached.h" namespace base { diff --git a/chromium/base/sync_socket_posix.cc b/chromium/base/sync_socket_posix.cc index 321a541ba91..4f30690eeae 100644 --- a/chromium/base/sync_socket_posix.cc +++ b/chromium/base/sync_socket_posix.cc @@ -18,8 +18,8 @@ #include <sys/filio.h> #endif +#include "base/check_op.h" #include "base/files/file_util.h" -#include "base/logging.h" #include "base/threading/scoped_blocking_call.h" #include "build/build_config.h" diff --git a/chromium/base/synchronization/atomic_flag.cc b/chromium/base/synchronization/atomic_flag.cc index 381dbc4f9b5..e3c5f407e26 100644 --- a/chromium/base/synchronization/atomic_flag.cc +++ b/chromium/base/synchronization/atomic_flag.cc @@ -4,7 +4,7 @@ #include "base/synchronization/atomic_flag.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { diff --git a/chromium/base/synchronization/atomic_flag_unittest.cc b/chromium/base/synchronization/atomic_flag_unittest.cc index f7daafa5026..b452b408d88 100644 --- a/chromium/base/synchronization/atomic_flag_unittest.cc +++ b/chromium/base/synchronization/atomic_flag_unittest.cc @@ -5,7 +5,7 @@ #include "base/synchronization/atomic_flag.h" #include "base/bind.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/single_thread_task_runner.h" #include "base/synchronization/waitable_event.h" #include "base/test/gtest_util.h" diff --git a/chromium/base/synchronization/lock_impl.h b/chromium/base/synchronization/lock_impl.h index 830b878e8e8..66f4b32696a 100644 --- a/chromium/base/synchronization/lock_impl.h +++ b/chromium/base/synchronization/lock_impl.h @@ -16,6 +16,8 @@ #elif defined(OS_POSIX) || defined(OS_FUCHSIA) #include <errno.h> #include <pthread.h> +#include <string.h> +#include <ostream> #endif namespace base { diff --git a/chromium/base/synchronization/lock_impl_posix.cc b/chromium/base/synchronization/lock_impl_posix.cc index 7571f68a9a8..bd709b42a9b 100644 --- a/chromium/base/synchronization/lock_impl_posix.cc +++ b/chromium/base/synchronization/lock_impl_posix.cc @@ -6,8 +6,8 @@ #include <string> +#include "base/check_op.h" #include "base/debug/activity_tracker.h" -#include "base/logging.h" #include "base/posix/safe_strerror.h" #include "base/strings/stringprintf.h" #include "base/synchronization/lock.h" diff --git a/chromium/base/synchronization/waitable_event_posix.cc b/chromium/base/synchronization/waitable_event_posix.cc index 1384e033ad3..ab36f8dbef1 100644 --- a/chromium/base/synchronization/waitable_event_posix.cc +++ b/chromium/base/synchronization/waitable_event_posix.cc @@ -8,8 +8,8 @@ #include <limits> #include <vector> +#include "base/check_op.h" #include "base/debug/activity_tracker.h" -#include "base/logging.h" #include "base/optional.h" #include "base/synchronization/condition_variable.h" #include "base/synchronization/lock.h" diff --git a/chromium/base/synchronization/waitable_event_watcher_posix.cc b/chromium/base/synchronization/waitable_event_watcher_posix.cc index 2b296dafd75..8e343fd3880 100644 --- a/chromium/base/synchronization/waitable_event_watcher_posix.cc +++ b/chromium/base/synchronization/waitable_event_watcher_posix.cc @@ -7,7 +7,7 @@ #include <utility> #include "base/bind.h" -#include "base/logging.h" +#include "base/check.h" #include "base/synchronization/lock.h" #include "base/threading/sequenced_task_runner_handle.h" diff --git a/chromium/base/system/sys_info.cc b/chromium/base/system/sys_info.cc index 6909668cc46..6096748c7ff 100644 --- a/chromium/base/system/sys_info.cc +++ b/chromium/base/system/sys_info.cc @@ -11,8 +11,8 @@ #include "base/callback.h" #include "base/command_line.h" #include "base/location.h" -#include "base/logging.h" #include "base/no_destructor.h" +#include "base/notreached.h" #include "base/system/sys_info_internal.h" #include "base/task/post_task.h" #include "base/task/task_traits.h" diff --git a/chromium/base/system/sys_info_chromeos.cc b/chromium/base/system/sys_info_chromeos.cc index 00a3fc85882..1d688f13491 100644 --- a/chromium/base/system/sys_info_chromeos.cc +++ b/chromium/base/system/sys_info_chromeos.cc @@ -13,6 +13,7 @@ #include "base/files/file_path.h" #include "base/files/file_util.h" #include "base/lazy_instance.h" +#include "base/notreached.h" #include "base/stl_util.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_piece.h" diff --git a/chromium/base/system/sys_info_freebsd.cc b/chromium/base/system/sys_info_freebsd.cc index 09ca9263125..52d778a89d0 100644 --- a/chromium/base/system/sys_info_freebsd.cc +++ b/chromium/base/system/sys_info_freebsd.cc @@ -8,7 +8,7 @@ #include <stdint.h> #include <sys/sysctl.h> -#include "base/logging.h" +#include "base/notreached.h" namespace base { diff --git a/chromium/base/system/sys_info_ios.mm b/chromium/base/system/sys_info_ios.mm index 1dd9dc18ad8..7936aef1847 100644 --- a/chromium/base/system/sys_info_ios.mm +++ b/chromium/base/system/sys_info_ios.mm @@ -11,8 +11,9 @@ #include <sys/sysctl.h> #include <sys/types.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/mac/scoped_mach_port.h" +#include "base/notreached.h" #include "base/process/process_metrics.h" #include "base/stl_util.h" #include "base/strings/string_util.h" diff --git a/chromium/base/system/sys_info_linux.cc b/chromium/base/system/sys_info_linux.cc index 27457b80131..d9bfa496fde 100644 --- a/chromium/base/system/sys_info_linux.cc +++ b/chromium/base/system/sys_info_linux.cc @@ -9,9 +9,10 @@ #include <limits> +#include "base/check.h" #include "base/files/file_util.h" #include "base/lazy_instance.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/process/process_metrics.h" #include "base/strings/string_number_conversions.h" diff --git a/chromium/base/system/sys_info_mac.mm b/chromium/base/system/sys_info_mac.mm index 265d7678060..c6c772cf698 100644 --- a/chromium/base/system/sys_info_mac.mm +++ b/chromium/base/system/sys_info_mac.mm @@ -14,9 +14,10 @@ #include <sys/sysctl.h> #include <sys/types.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/mac/mac_util.h" #include "base/mac/scoped_mach_port.h" +#include "base/notreached.h" #include "base/process/process_metrics.h" #include "base/stl_util.h" #include "base/strings/string_util.h" diff --git a/chromium/base/system/sys_info_openbsd.cc b/chromium/base/system/sys_info_openbsd.cc index c6bd917e4aa..3dc98b04892 100644 --- a/chromium/base/system/sys_info_openbsd.cc +++ b/chromium/base/system/sys_info_openbsd.cc @@ -10,7 +10,7 @@ #include <sys/shm.h> #include <sys/sysctl.h> -#include "base/logging.h" +#include "base/notreached.h" #include "base/stl_util.h" namespace { diff --git a/chromium/base/system/sys_info_posix.cc b/chromium/base/system/sys_info_posix.cc index 58c0c4f63c3..33685240265 100644 --- a/chromium/base/system/sys_info_posix.cc +++ b/chromium/base/system/sys_info_posix.cc @@ -15,7 +15,7 @@ #include "base/files/file_util.h" #include "base/lazy_instance.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/strings/utf_string_conversions.h" #include "base/system/sys_info_internal.h" #include "base/threading/scoped_blocking_call.h" diff --git a/chromium/base/system/sys_info_win.cc b/chromium/base/system/sys_info_win.cc index 704abe4950e..685e593faee 100644 --- a/chromium/base/system/sys_info_win.cc +++ b/chromium/base/system/sys_info_win.cc @@ -10,8 +10,9 @@ #include <limits> +#include "base/check.h" #include "base/files/file_path.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/process/process_metrics.h" #include "base/strings/string_util.h" #include "base/strings/stringprintf.h" diff --git a/chromium/base/task/cancelable_task_tracker_unittest.cc b/chromium/base/task/cancelable_task_tracker_unittest.cc index bf1a7bab274..7bd0192d6ef 100644 --- a/chromium/base/task/cancelable_task_tracker_unittest.cc +++ b/chromium/base/task/cancelable_task_tracker_unittest.cc @@ -8,8 +8,8 @@ #include "base/bind.h" #include "base/bind_helpers.h" +#include "base/check_op.h" #include "base/location.h" -#include "base/logging.h" #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/run_loop.h" diff --git a/chromium/base/task/common/checked_lock_impl.cc b/chromium/base/task/common/checked_lock_impl.cc index c801f057e2f..698886e1615 100644 --- a/chromium/base/task/common/checked_lock_impl.cc +++ b/chromium/base/task/common/checked_lock_impl.cc @@ -8,8 +8,8 @@ #include <unordered_map> #include <vector> +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/synchronization/condition_variable.h" #include "base/task/common/checked_lock.h" #include "base/threading/platform_thread.h" diff --git a/chromium/base/task/common/operations_controller.cc b/chromium/base/task/common/operations_controller.cc index 7787cdf2948..14181ccf487 100644 --- a/chromium/base/task/common/operations_controller.cc +++ b/chromium/base/task/common/operations_controller.cc @@ -3,7 +3,7 @@ // found in the LICENSE file. #include "base/task/common/operations_controller.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace internal { diff --git a/chromium/base/task/common/task_annotator.cc b/chromium/base/task/common/task_annotator.cc index 0ec59979b53..1001359c712 100644 --- a/chromium/base/task/common/task_annotator.cc +++ b/chromium/base/task/common/task_annotator.cc @@ -142,6 +142,15 @@ void TaskAnnotator::RunTask(const char* trace_event_name, std::move(pending_task->task).Run(); tls->Set(previous_pending_task); + + // Stomp the markers. Otherwise they can stick around on the unused parts of + // stack and cause |task_backtrace| to be associated with an unrelated stack + // sample on this thread later in the event of a crash. Alias once again after + // these writes to make sure the compiler doesn't optimize them out (unused + // writes to a local variable). + task_backtrace.front() = nullptr; + task_backtrace.back() = nullptr; + debug::Alias(&task_backtrace); } uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const { diff --git a/chromium/base/task/common/task_annotator.h b/chromium/base/task/common/task_annotator.h index 7d8acf9c736..30d18b1cf08 100644 --- a/chromium/base/task/common/task_annotator.h +++ b/chromium/base/task/common/task_annotator.h @@ -10,6 +10,7 @@ #include <memory> #include "base/base_export.h" +#include "base/compiler_specific.h" #include "base/macros.h" #include "base/pending_task.h" @@ -45,7 +46,8 @@ class BASE_EXPORT TaskAnnotator { const char* task_queue_name); // Run a previously queued task. - void RunTask(const char* trace_event_name, PendingTask* pending_task); + void NOT_TAIL_CALLED RunTask(const char* trace_event_name, + PendingTask* pending_task); // Creates a process-wide unique ID to represent this task in trace events. // This will be mangled with a Process ID hash to reduce the likelyhood of diff --git a/chromium/base/task/job_perftest.cc b/chromium/base/task/job_perftest.cc new file mode 100644 index 00000000000..2e6782ac9f5 --- /dev/null +++ b/chromium/base/task/job_perftest.cc @@ -0,0 +1,388 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <stddef.h> +#include <atomic> +#include <utility> +#include <vector> + +#include "base/bind_helpers.h" +#include "base/containers/queue.h" +#include "base/containers/stack.h" +#include "base/optional.h" +#include "base/synchronization/lock.h" +#include "base/task/post_job.h" +#include "base/task/post_task.h" +#include "base/test/bind_test_util.h" +#include "base/test/task_environment.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "testing/perf/perf_test.h" + +namespace base { + +namespace { + +// A thread-safe data structure that generates heuristic starting points in a +// range to process items in parallel. +// Note: we could expose this atomic-binary-search-index-generator in +// //base/util if it's useful for real-world use cases. +class IndexGenerator { + public: + explicit IndexGenerator(size_t size) : size_(size) { + AutoLock auto_lock(lock_); + pending_indices_.push(0); + ranges_to_split_.push({0, size_}); + } + + Optional<size_t> GetNext() { + AutoLock auto_lock(lock_); + if (!pending_indices_.empty()) { + // Return any pending index first. + auto index = pending_indices_.top(); + pending_indices_.pop(); + return index; + } + if (ranges_to_split_.empty()) + return nullopt; + + // Split the oldest running range in 2 and return the middle index as + // starting point. + auto range = ranges_to_split_.front(); + ranges_to_split_.pop(); + size_t size = range.second - range.first; + size_t mid = range.first + size / 2; + // Both sides of the range are added to |ranges_to_split_| so they may be + // further split if possible. + if (mid - range.first > 1) + ranges_to_split_.push({range.first, mid}); + if (range.second - mid > 1) + ranges_to_split_.push({mid, range.second}); + return mid; + } + + void GiveBack(size_t index) { + AutoLock auto_lock(lock_); + // Add |index| to pending indices so GetNext() may return it before anything + // else. + pending_indices_.push(index); + } + + private: + base::Lock lock_; + // Pending indices that are ready to be handed out, prioritized over + // |pending_ranges_| when non-empty. + base::stack<size_t> pending_indices_ GUARDED_BY(lock_); + // Pending [start, end] (exclusive) ranges to split and hand out indices from. + base::queue<std::pair<size_t, size_t>> ranges_to_split_ GUARDED_BY(lock_); + const size_t size_; + + DISALLOW_COPY_AND_ASSIGN(IndexGenerator); +}; + +struct WorkItem { + std::atomic_bool acquire{false}; + + bool TryAcquire() { + // memory_order_relaxed is sufficient as the WorkItem's state itself hasn't + // been modified since the beginning of its associated job. This is only + // atomically acquiring the right to work on it. + return acquire.exchange(true, std::memory_order_relaxed) == false; + } +}; + +class WorkList { + public: + WorkList(size_t num_work_items, RepeatingCallback<void(size_t)> process_item) + : num_incomplete_items_(num_work_items), + items_(num_work_items), + process_item_(std::move(process_item)) {} + + // Acquires work item at |index|. Returns true if successful, or false if the + // item was already acquired. + bool TryAcquire(size_t index) { return items_[index].TryAcquire(); } + + // Processes work item at |index|. Returns true if there are more work items + // to process, or false if all items were processed. + bool ProcessWorkItem(size_t index) { + process_item_.Run(index); + return num_incomplete_items_.fetch_sub(1, std::memory_order_relaxed) > 1; + } + + size_t NumIncompleteWorkItems() const { + // memory_order_relaxed is sufficient since this is not synchronized with + // other state. + return num_incomplete_items_.load(std::memory_order_relaxed); + } + + size_t NumWorkItems() const { return items_.size(); } + + private: + std::atomic_size_t num_incomplete_items_; + std::vector<WorkItem> items_; + RepeatingCallback<void(size_t)> process_item_; + + DISALLOW_COPY_AND_ASSIGN(WorkList); +}; + +RepeatingCallback<void(size_t)> BusyWaitCallback(TimeDelta delta) { + return base::BindRepeating( + [](base::TimeDelta duration, size_t index) { + const base::TimeTicks end_time = base::TimeTicks::Now() + duration; + while (base::TimeTicks::Now() < end_time) + ; + }, + delta); +} + +// Posts |task_count| no-op tasks every |delay|. +void DisruptivePostTasks(size_t task_count, TimeDelta delay) { + for (size_t i = 0; i < task_count; ++i) { + PostTask(FROM_HERE, {ThreadPool(), TaskPriority::USER_BLOCKING}, + DoNothing()); + } + PostDelayedTask(FROM_HERE, {ThreadPool(), TaskPriority::USER_BLOCKING}, + BindOnce(&DisruptivePostTasks, task_count, delay), delay); +} + +class JobPerfTest : public testing::Test { + public: + JobPerfTest() = default; + + // Process |num_work_items| items with |process_item| in parallel. Work is + // assigned by having each worker sequentially traversing all items and + // acquiring unvisited ones. + void RunJobWithNaiveAssignment(const std::string& trace, + size_t num_work_items, + RepeatingCallback<void(size_t)> process_item) { + WorkList work_list(num_work_items, std::move(process_item)); + + const TimeTicks job_run_start = TimeTicks::Now(); + + WaitableEvent complete; + auto handle = PostJob(FROM_HERE, {TaskPriority::USER_VISIBLE}, + BindRepeating( + [](WorkList* work_list, WaitableEvent* complete, + JobDelegate* delegate) { + for (size_t i = 0; + i < work_list->NumWorkItems() && + work_list->NumIncompleteWorkItems() != 0 && + !delegate->ShouldYield(); + ++i) { + if (!work_list->TryAcquire(i)) + continue; + if (!work_list->ProcessWorkItem(i)) { + complete->Signal(); + return; + } + } + }, + Unretained(&work_list), Unretained(&complete)), + BindRepeating(&WorkList::NumIncompleteWorkItems, + Unretained(&work_list))); + + complete.Wait(); + handle.Join(); + const TimeDelta job_duration = TimeTicks::Now() - job_run_start; + EXPECT_EQ(0U, work_list.NumIncompleteWorkItems()); + perf_test::PrintResult( + "Work throughput", "", trace, + size_t(num_work_items / job_duration.InMilliseconds()), "tasks/ms", + true); + } + + // Process |num_work_items| items with |process_item| in parallel. Work is + // assigned dynamically having each new worker given a different point far + // from other workers until all work is done. This is achieved by recursively + // splitting each range that was previously given in half. + void RunJobWithDynamicAssignment(const std::string& trace, + size_t num_work_items, + RepeatingCallback<void(size_t)> process_item, + bool disruptive_post_tasks = false) { + WorkList work_list(num_work_items, std::move(process_item)); + IndexGenerator generator(num_work_items); + + // Post extra tasks to disrupt Job execution and cause workers to yield. + if (disruptive_post_tasks) + DisruptivePostTasks(10, TimeDelta::FromMilliseconds(1)); + + const TimeTicks job_run_start = TimeTicks::Now(); + + WaitableEvent complete; + auto handle = PostJob( + FROM_HERE, {TaskPriority::USER_VISIBLE}, + BindRepeating( + [](IndexGenerator* generator, WorkList* work_list, + WaitableEvent* complete, JobDelegate* delegate) { + while (work_list->NumIncompleteWorkItems() != 0 && + !delegate->ShouldYield()) { + Optional<size_t> index = generator->GetNext(); + if (!index) + return; + for (size_t i = *index; i < work_list->NumWorkItems(); ++i) { + if (delegate->ShouldYield()) { + generator->GiveBack(i); + return; + } + if (!work_list->TryAcquire(i)) { + // If this was touched already, get a new starting point. + break; + } + if (!work_list->ProcessWorkItem(i)) { + complete->Signal(); + return; + } + } + } + }, + Unretained(&generator), Unretained(&work_list), + Unretained(&complete)), + BindRepeating(&WorkList::NumIncompleteWorkItems, + Unretained(&work_list))); + + complete.Wait(); + handle.Join(); + const TimeDelta job_duration = TimeTicks::Now() - job_run_start; + EXPECT_EQ(0U, work_list.NumIncompleteWorkItems()); + perf_test::PrintResult( + "Work throughput", "", trace, + size_t(num_work_items / job_duration.InMilliseconds()), "tasks/ms", + true); + } + + // Process |num_work_items| items with |process_item| in parallel. Work is + // assigned having each new worker given a different starting point far from + // other workers and loop over all work items from there. This is achieved by + // recursively splitting each range that was previously given in half. + void RunJobWithLoopAround(const std::string& trace, + size_t num_work_items, + RepeatingCallback<void(size_t)> process_item, + bool disruptive_post_tasks = false) { + WorkList work_list(num_work_items, std::move(process_item)); + IndexGenerator generator(num_work_items); + + // Post extra tasks to disrupt Job execution and cause workers to yield. + if (disruptive_post_tasks) + DisruptivePostTasks(10, TimeDelta::FromMilliseconds(1)); + + const TimeTicks job_run_start = TimeTicks::Now(); + + WaitableEvent complete; + auto handle = + PostJob(FROM_HERE, {TaskPriority::USER_VISIBLE}, + BindRepeating( + [](IndexGenerator* generator, WorkList* work_list, + WaitableEvent* complete, JobDelegate* delegate) { + Optional<size_t> index = generator->GetNext(); + if (!index) + return; + size_t i = *index; + while (true) { + if (delegate->ShouldYield()) { + generator->GiveBack(i); + return; + } + if (!work_list->TryAcquire(i)) { + // If this was touched already, skip. + continue; + } + if (!work_list->ProcessWorkItem(i)) { + // This will cause the loop to exit if there's no work + // left. + complete->Signal(); + return; + } + ++i; + if (i == work_list->NumWorkItems()) + i = 0; + } + }, + Unretained(&generator), Unretained(&work_list), + Unretained(&complete)), + BindRepeating(&WorkList::NumIncompleteWorkItems, + Unretained(&work_list))); + + complete.Wait(); + handle.Join(); + const TimeDelta job_duration = TimeTicks::Now() - job_run_start; + EXPECT_EQ(0U, work_list.NumIncompleteWorkItems()); + perf_test::PrintResult( + "Work throughput", "", trace, + size_t(num_work_items / job_duration.InMilliseconds()), "tasks/ms", + true); + } + + private: + test::TaskEnvironment task_environment; + + DISALLOW_COPY_AND_ASSIGN(JobPerfTest); +}; + +} // namespace + +// The perftest implements the following assignment strategy: +// - Naive: See RunJobWithNaiveAssignment(). +// - Dynamic: See RunJobWithDynamicAssignment(). +// - Loop around: See RunJobWithLoopAround(). +// The following test setups exists for different strategies, although +// not every combination is performed: +// - No-op: Work items are no-op tasks. +// - No-op + disrupted: 10 disruptive tasks are posted every 1ms. +// - Busy wait: Work items are busy wait for 5us. +// - Busy wait + disrupted + +TEST_F(JobPerfTest, NoOpWorkNaiveAssignment) { + RunJobWithNaiveAssignment("No-Op naive", 10000000, DoNothing()); +} + +TEST_F(JobPerfTest, BusyWaitNaiveAssignment) { + RepeatingCallback<void(size_t)> callback = + BusyWaitCallback(TimeDelta::FromMicroseconds(5)); + RunJobWithNaiveAssignment("BusyWait naive", 500000, std::move(callback)); +} + +TEST_F(JobPerfTest, NoOpWorkDynamicAssignment) { + RunJobWithDynamicAssignment("No-Op dynamic", 10000000, DoNothing()); +} + +TEST_F(JobPerfTest, NoOpDisruptedWorkDynamicAssignment) { + RunJobWithDynamicAssignment("No-Op dynamic disrupted", 10000000, DoNothing(), + true); +} + +TEST_F(JobPerfTest, BusyWaitWorkDynamicAssignment) { + RepeatingCallback<void(size_t)> callback = + BusyWaitCallback(TimeDelta::FromMicroseconds(5)); + RunJobWithDynamicAssignment("BusyWait dynamic", 500000, std::move(callback)); +} + +TEST_F(JobPerfTest, BusyWaitDisruptedWorkDynamicAssignment) { + RepeatingCallback<void(size_t)> callback = + BusyWaitCallback(TimeDelta::FromMicroseconds(5)); + RunJobWithDynamicAssignment("BusyWait dynamic disrupted", 500000, + std::move(callback), true); +} + +TEST_F(JobPerfTest, NoOpWorkLoopAround) { + RunJobWithLoopAround("No-Op loop around", 10000000, DoNothing()); +} + +TEST_F(JobPerfTest, NoOpDisruptedWorkLoopAround) { + RunJobWithLoopAround("No-Op loop around disrupted", 10000000, DoNothing(), + true); +} + +TEST_F(JobPerfTest, BusyWaitWorkLoopAround) { + RepeatingCallback<void(size_t)> callback = + BusyWaitCallback(TimeDelta::FromMicroseconds(5)); + RunJobWithLoopAround("BusyWait loop around", 500000, std::move(callback)); +} + +TEST_F(JobPerfTest, BusyWaitDisruptedWorkLoopAround) { + RepeatingCallback<void(size_t)> callback = + BusyWaitCallback(TimeDelta::FromMicroseconds(5)); + RunJobWithLoopAround("BusyWait loop around disrupted", 500000, + std::move(callback), true); +} + +} // namespace base diff --git a/chromium/base/task/lazy_thread_pool_task_runner.cc b/chromium/base/task/lazy_thread_pool_task_runner.cc index 47d52aee615..0b16f471cd5 100644 --- a/chromium/base/task/lazy_thread_pool_task_runner.cc +++ b/chromium/base/task/lazy_thread_pool_task_runner.cc @@ -6,8 +6,8 @@ #include <utility> +#include "base/check_op.h" #include "base/lazy_instance_helpers.h" -#include "base/logging.h" #include "base/task/thread_pool.h" namespace base { diff --git a/chromium/base/task/post_job.h b/chromium/base/task/post_job.h index fe791cff62b..1d396f1fb11 100644 --- a/chromium/base/task/post_job.h +++ b/chromium/base/task/post_job.h @@ -21,7 +21,8 @@ class PooledTaskRunnerDelegate; } // Delegate that's passed to Job's worker task, providing an entry point to -// communicate with the scheduler. +// communicate with the scheduler. To prevent deadlocks, JobDelegate methods +// should never be called while holding a user lock. class BASE_EXPORT JobDelegate { public: // A JobDelegate is instantiated for each worker task that is run. @@ -72,7 +73,8 @@ class BASE_EXPORT JobDelegate { }; // Handle returned when posting a Job. Provides methods to control execution of -// the posted Job. +// the posted Job. To prevent deadlocks, JobHandle methods should never be +// called while holding a user lock. class BASE_EXPORT JobHandle { public: JobHandle(); @@ -124,6 +126,15 @@ class BASE_EXPORT JobHandle { // base::ThreadPool. // Returns a JobHandle associated with the Job, which can be joined, canceled or // detached. +// ThreadPool APIs, including PostJob() and methods of the returned JobHandle, +// must never be called while holding a lock that could be acquired by +// |worker_task| or |max_concurrency_callback| -- that could result in a +// deadlock. This is because [1] |max_concurrency_callback| may be invoked while +// holding internal ThreadPool lock (A), hence |max_concurrency_callback| can +// only use a lock (B) if that lock is *never* held while calling back into a +// ThreadPool entry point from any thread (A=>B/B=>A deadlock) and [2] +// |worker_task| or |max_concurrency_callback| is invoked synchronously from +// JobHandle::Join() (A=>JobHandle::Join()=>A deadlock). // To avoid scheduling overhead, |worker_task| should do as much work as // possible in a loop when invoked, and JobDelegate::ShouldYield() should be // periodically invoked to conditionally exit and let the scheduler prioritize diff --git a/chromium/base/task/post_task.cc b/chromium/base/task/post_task.cc index c1d6d93c266..12218599e48 100644 --- a/chromium/base/task/post_task.cc +++ b/chromium/base/task/post_task.cc @@ -6,7 +6,7 @@ #include <utility> -#include "base/logging.h" +#include "base/check_op.h" #include "base/task/scoped_set_task_priority_for_current_thread.h" #include "base/task/task_executor.h" #include "base/task/thread_pool/thread_pool_impl.h" diff --git a/chromium/base/task/scoped_set_task_priority_for_current_thread.cc b/chromium/base/task/scoped_set_task_priority_for_current_thread.cc index 38eb0e97d4a..6020982bc6c 100644 --- a/chromium/base/task/scoped_set_task_priority_for_current_thread.cc +++ b/chromium/base/task/scoped_set_task_priority_for_current_thread.cc @@ -4,8 +4,8 @@ #include "base/task/scoped_set_task_priority_for_current_thread.h" +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/threading/thread_local.h" namespace base { diff --git a/chromium/base/task/sequence_manager/atomic_flag_set.cc b/chromium/base/task/sequence_manager/atomic_flag_set.cc index 8ccc0276a59..67a149f4459 100644 --- a/chromium/base/task/sequence_manager/atomic_flag_set.cc +++ b/chromium/base/task/sequence_manager/atomic_flag_set.cc @@ -8,7 +8,7 @@ #include "base/bits.h" #include "base/callback.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace sequence_manager { diff --git a/chromium/base/task/sequence_manager/lazily_deallocated_deque.h b/chromium/base/task/sequence_manager/lazily_deallocated_deque.h index 793f8935294..b7d1b428afa 100644 --- a/chromium/base/task/sequence_manager/lazily_deallocated_deque.h +++ b/chromium/base/task/sequence_manager/lazily_deallocated_deque.h @@ -13,6 +13,7 @@ #include "base/debug/alias.h" #include "base/gtest_prod_util.h" #include "base/logging.h" +#include "base/macros.h" #include "base/time/time.h" namespace base { diff --git a/chromium/base/task/sequence_manager/lazy_now.h b/chromium/base/task/sequence_manager/lazy_now.h index e62db65c301..874a78c214d 100644 --- a/chromium/base/task/sequence_manager/lazy_now.h +++ b/chromium/base/task/sequence_manager/lazy_now.h @@ -6,6 +6,7 @@ #define BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_ #include "base/base_export.h" +#include "base/macros.h" #include "base/optional.h" #include "base/time/time.h" diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.cc b/chromium/base/task/sequence_manager/sequence_manager_impl.cc index 70c78ba6957..3262cadd9a3 100644 --- a/chromium/base/task/sequence_manager/sequence_manager_impl.cc +++ b/chromium/base/task/sequence_manager/sequence_manager_impl.cc @@ -695,7 +695,7 @@ TimeDelta SequenceManagerImpl::GetDelayTillNextDelayedTask( bool SequenceManagerImpl::HasPendingHighResolutionTasks() { for (TimeDomain* time_domain : main_thread_only().time_domains) { - if (time_domain->HasPendingHighResolutionTasks()) + if (time_domain->has_pending_high_resolution_tasks()) return true; } return false; diff --git a/chromium/base/task/sequence_manager/task_queue_selector.cc b/chromium/base/task/sequence_manager/task_queue_selector.cc index ad1a803db7f..3b4f59d1efc 100644 --- a/chromium/base/task/sequence_manager/task_queue_selector.cc +++ b/chromium/base/task/sequence_manager/task_queue_selector.cc @@ -7,7 +7,7 @@ #include <utility> #include "base/bits.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/task/sequence_manager/associated_thread_id.h" #include "base/task/sequence_manager/task_queue_impl.h" #include "base/task/sequence_manager/work_queue.h" diff --git a/chromium/base/task/sequence_manager/task_time_observer.h b/chromium/base/task/sequence_manager/task_time_observer.h index 151a94119bb..0858580d21d 100644 --- a/chromium/base/task/sequence_manager/task_time_observer.h +++ b/chromium/base/task/sequence_manager/task_time_observer.h @@ -5,6 +5,7 @@ #ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_ #define BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_ +#include "base/macros.h" #include "base/time/time.h" namespace base { diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc index 994525f0626..f225da8b584 100644 --- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc +++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc @@ -5,8 +5,10 @@ #include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h" #include "base/auto_reset.h" +#include "base/feature_list.h" #include "base/memory/ptr_util.h" #include "base/message_loop/message_pump.h" +#include "base/power_monitor/power_monitor.h" #include "base/threading/hang_watcher.h" #include "base/time/tick_clock.h" #include "base/trace_event/trace_event.h" @@ -23,6 +25,12 @@ namespace sequence_manager { namespace internal { namespace { +// Activate the power management events that affect the tasks scheduling. +const Feature kUsePowerMonitorWithThreadController{ + "UsePowerMonitorWithThreadController", FEATURE_DISABLED_BY_DEFAULT}; + +bool g_use_power_monitor_with_thread_controller = false; + // Returns |next_run_time| capped at 1 day from |lazy_now|. This is used to // mitigate https://crbug.com/850450 where some platforms are unhappy with // delays > 100,000,000 seconds. In practice, a diagnosis metric showed that no @@ -139,8 +147,8 @@ void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork( main_thread_only().next_delayed_do_work = run_time; run_time = CapAtOneDay(run_time, lazy_now); - // It's very rare for PostDelayedTask to be called outside of a Do(Some)Work - // in production, so most of the time this does nothing. + // It's very rare for PostDelayedTask to be called outside of a DoWork in + // production, so most of the time this does nothing. if (work_deduplicator_.OnDelayedWorkRequested() == ShouldScheduleWork::kScheduleImmediate) { // |pump_| can't be null as all postTasks are cross-thread before binding, @@ -242,10 +250,8 @@ ThreadControllerWithMessagePumpImpl::DoWork() { } work_deduplicator_.OnWorkStarted(); - bool ran_task = false; // Unused. LazyNow continuation_lazy_now(time_source_); - TimeDelta delay_till_next_task = - DoWorkImpl(&continuation_lazy_now, &ran_task); + TimeDelta delay_till_next_task = DoWorkImpl(&continuation_lazy_now); // Schedule a continuation. WorkDeduplicator::NextTask next_task = delay_till_next_task.is_zero() ? WorkDeduplicator::NextTask::kIsImmediate @@ -287,8 +293,7 @@ ThreadControllerWithMessagePumpImpl::DoWork() { } TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl( - LazyNow* continuation_lazy_now, - bool* ran_task) { + LazyNow* continuation_lazy_now) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "ThreadControllerImpl::DoWork"); @@ -332,7 +337,6 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl( } #endif - *ran_task = true; main_thread_only().task_execution_allowed = true; main_thread_only().task_source->DidRunTask(); @@ -364,15 +368,25 @@ bool ThreadControllerWithMessagePumpImpl::DoIdleWork() { work_id_provider_->IncrementWorkId(); #if defined(OS_WIN) - bool need_high_res_mode = - main_thread_only().task_source->HasPendingHighResolutionTasks(); - if (main_thread_only().in_high_res_mode != need_high_res_mode) { - // On Windows we activate the high resolution timer so that the wait - // _if_ triggered by the timer happens with good resolution. If we don't - // do this the default resolution is 15ms which might not be acceptable - // for some tasks. - main_thread_only().in_high_res_mode = need_high_res_mode; - Time::ActivateHighResolutionTimer(need_high_res_mode); + if (!g_use_power_monitor_with_thread_controller || + !base::PowerMonitor::IsProcessSuspended()) { + // Avoid calling Time::ActivateHighResolutionTimer() between + // suspend/resume as the system hangs if we do (crbug.com/1074028). + // OnResume() will generate a task on this thread per the + // ThreadControllerPowerMonitor observer and DoIdleWork() will thus get + // another chance to set the right high-resolution-timer-state before + // going to sleep after resume. + + const bool need_high_res_mode = + main_thread_only().task_source->HasPendingHighResolutionTasks(); + if (main_thread_only().in_high_res_mode != need_high_res_mode) { + // On Windows we activate the high resolution timer so that the wait + // _if_ triggered by the timer happens with good resolution. If we don't + // do this the default resolution is 15ms which might not be acceptable + // for some tasks. + main_thread_only().in_high_res_mode = need_high_res_mode; + Time::ActivateHighResolutionTimer(need_high_res_mode); + } } #endif // defined(OS_WIN) @@ -446,7 +460,7 @@ void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed, void ThreadControllerWithMessagePumpImpl::OnBeginNestedRunLoop() { // We don't need to ScheduleWork here! That's because the call to pump_->Run() // above, which is always called for RunLoop().Run(), guarantees a call to - // Do(Some)Work on all platforms. + // DoWork on all platforms. if (main_thread_only().nesting_observer) main_thread_only().nesting_observer->OnBeginNestedRunLoop(); } @@ -476,7 +490,7 @@ void ThreadControllerWithMessagePumpImpl::SetTaskExecutionAllowed( if (allowed) { // We need to schedule work unconditionally because we might be about to // enter an OS level nested message loop. Unlike a RunLoop().Run() we don't - // get a call to Do(Some)Work on entering for free. + // get a call to DoWork on entering for free. work_deduplicator_.OnWorkRequested(); // Set the pending DoWork flag. pump_->ScheduleWork(); } else { @@ -518,5 +532,11 @@ bool ThreadControllerWithMessagePumpImpl::ShouldQuitRunLoopWhenIdle() { } } // namespace internal + +void PostFieldTrialInitialization() { + internal::g_use_power_monitor_with_thread_controller = + FeatureList::IsEnabled(internal::kUsePowerMonitorWithThreadController); +} + } // namespace sequence_manager } // namespace base diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h index 1a1d52150bc..0dbf946f9ea 100644 --- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h +++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h @@ -100,7 +100,7 @@ class BASE_EXPORT ThreadControllerWithMessagePumpImpl // Returns the delay till the next task. If there's no delay TimeDelta::Max() // will be returned. - TimeDelta DoWorkImpl(LazyNow* continuation_lazy_now, bool* ran_task); + TimeDelta DoWorkImpl(LazyNow* continuation_lazy_now); void InitializeThreadTaskRunnerHandle() EXCLUSIVE_LOCKS_REQUIRED(task_runner_lock_); @@ -187,6 +187,11 @@ class BASE_EXPORT ThreadControllerWithMessagePumpImpl }; } // namespace internal + +// Initialize ThreadController features. Called after FeatureList is available +// when the process is still single-threaded. +BASE_EXPORT void PostFieldTrialInitialization(); + } // namespace sequence_manager } // namespace base diff --git a/chromium/base/task/sequence_manager/time_domain.cc b/chromium/base/task/sequence_manager/time_domain.cc index b34f32fc4c7..1df52f3d131 100644 --- a/chromium/base/task/sequence_manager/time_domain.cc +++ b/chromium/base/task/sequence_manager/time_domain.cc @@ -156,13 +156,5 @@ void TimeDomain::AsValueIntoInternal(trace_event::TracedValue* state) const { // Can be overriden to trace some additional state. } -bool TimeDomain::HasPendingHighResolutionTasks() const { - return pending_high_res_wake_up_count_; -} - -bool TimeDomain::Empty() const { - return delayed_wake_up_queue_.empty(); -} - } // namespace sequence_manager } // namespace base diff --git a/chromium/base/task/sequence_manager/time_domain.h b/chromium/base/task/sequence_manager/time_domain.h index 49b626dd501..ddbbc54bd96 100644 --- a/chromium/base/task/sequence_manager/time_domain.h +++ b/chromium/base/task/sequence_manager/time_domain.h @@ -57,10 +57,13 @@ class BASE_EXPORT TimeDomain { virtual Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) = 0; void AsValueInto(trace_event::TracedValue* state) const; - bool HasPendingHighResolutionTasks() const; + + bool has_pending_high_resolution_tasks() const { + return pending_high_res_wake_up_count_; + } // Returns true if there are no pending delayed tasks. - bool Empty() const; + bool empty() const { return delayed_wake_up_queue_.empty(); } // This is the signal that virtual time should step forward. If // RunLoop::QuitWhenIdle has been called then |quit_when_idle_requested| will diff --git a/chromium/base/task/sequence_manager/time_domain_unittest.cc b/chromium/base/task/sequence_manager/time_domain_unittest.cc index 23a5cbacfd2..2096520fc16 100644 --- a/chromium/base/task/sequence_manager/time_domain_unittest.cc +++ b/chromium/base/task/sequence_manager/time_domain_unittest.cc @@ -101,14 +101,14 @@ class TimeDomainTest : public testing::Test { TEST_F(TimeDomainTest, ScheduleWakeUpForQueue) { TimeDelta delay = TimeDelta::FromMilliseconds(10); TimeTicks delayed_runtime = time_domain_->Now() + delay; - EXPECT_TRUE(time_domain_->Empty()); + EXPECT_TRUE(time_domain_->empty()); EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, delayed_runtime)); TimeTicks now = time_domain_->Now(); LazyNow lazy_now(now); task_queue_->SetDelayedWakeUpForTesting( internal::DelayedWakeUp{now + delay, 0}); - EXPECT_FALSE(time_domain_->Empty()); + EXPECT_FALSE(time_domain_->empty()); EXPECT_EQ(delayed_runtime, time_domain_->NextScheduledRunTime()); EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue()); @@ -199,7 +199,7 @@ TEST_F(TimeDomainTest, UnregisterQueue) { std::unique_ptr<TaskQueueImplForTest> task_queue2 = std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(), TaskQueue::Spec("test")); - EXPECT_TRUE(time_domain_->Empty()); + EXPECT_TRUE(time_domain_->empty()); TimeTicks now = time_domain_->Now(); LazyNow lazy_now(now); @@ -208,7 +208,7 @@ TEST_F(TimeDomainTest, UnregisterQueue) { task_queue_->SetDelayedWakeUpForTesting(internal::DelayedWakeUp{wake_up1, 0}); TimeTicks wake_up2 = now + TimeDelta::FromMilliseconds(100); task_queue2->SetDelayedWakeUpForTesting(internal::DelayedWakeUp{wake_up2, 0}); - EXPECT_FALSE(time_domain_->Empty()); + EXPECT_FALSE(time_domain_->empty()); EXPECT_EQ(task_queue_.get(), time_domain_->NextScheduledTaskQueue()); @@ -222,7 +222,7 @@ TEST_F(TimeDomainTest, UnregisterQueue) { task_queue_->UnregisterTaskQueue(); task_queue_ = nullptr; - EXPECT_FALSE(time_domain_->Empty()); + EXPECT_FALSE(time_domain_->empty()); testing::Mock::VerifyAndClearExpectations(time_domain_.get()); EXPECT_CALL(*time_domain_.get(), SetNextDelayedDoWork(_, TimeTicks::Max())) @@ -233,7 +233,7 @@ TEST_F(TimeDomainTest, UnregisterQueue) { task_queue2->UnregisterTaskQueue(); task_queue2 = nullptr; - EXPECT_TRUE(time_domain_->Empty()); + EXPECT_TRUE(time_domain_->empty()); } TEST_F(TimeDomainTest, MoveReadyDelayedTasksToWorkQueues) { @@ -345,48 +345,48 @@ TEST_F(TimeDomainTest, HighResolutionWakeUps) { TaskQueueImplForTest q2(nullptr, time_domain_.get(), TaskQueue::Spec("test")); // Add two high resolution wake-ups. - EXPECT_FALSE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_FALSE(time_domain_->has_pending_high_resolution_tasks()); time_domain_->SetNextWakeUpForQueue( &q1, internal::DelayedWakeUp{run_time1, 0}, internal::WakeUpResolution::kHigh, &lazy_now); - EXPECT_TRUE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_TRUE(time_domain_->has_pending_high_resolution_tasks()); time_domain_->SetNextWakeUpForQueue( &q2, internal::DelayedWakeUp{run_time2, 0}, internal::WakeUpResolution::kHigh, &lazy_now); - EXPECT_TRUE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_TRUE(time_domain_->has_pending_high_resolution_tasks()); // Remove one of the wake-ups. time_domain_->SetNextWakeUpForQueue( &q1, nullopt, internal::WakeUpResolution::kLow, &lazy_now); - EXPECT_TRUE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_TRUE(time_domain_->has_pending_high_resolution_tasks()); // Remove the second one too. time_domain_->SetNextWakeUpForQueue( &q2, nullopt, internal::WakeUpResolution::kLow, &lazy_now); - EXPECT_FALSE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_FALSE(time_domain_->has_pending_high_resolution_tasks()); // Change a low resolution wake-up to a high resolution one. time_domain_->SetNextWakeUpForQueue( &q1, internal::DelayedWakeUp{run_time1, 0}, internal::WakeUpResolution::kLow, &lazy_now); - EXPECT_FALSE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_FALSE(time_domain_->has_pending_high_resolution_tasks()); time_domain_->SetNextWakeUpForQueue( &q1, internal::DelayedWakeUp{run_time1, 0}, internal::WakeUpResolution::kHigh, &lazy_now); - EXPECT_TRUE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_TRUE(time_domain_->has_pending_high_resolution_tasks()); // Move a high resolution wake-up in time. time_domain_->SetNextWakeUpForQueue( &q1, internal::DelayedWakeUp{run_time2, 0}, internal::WakeUpResolution::kHigh, &lazy_now); - EXPECT_TRUE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_TRUE(time_domain_->has_pending_high_resolution_tasks()); // Cancel the wake-up twice. time_domain_->SetNextWakeUpForQueue( &q1, nullopt, internal::WakeUpResolution::kLow, &lazy_now); time_domain_->SetNextWakeUpForQueue( &q1, nullopt, internal::WakeUpResolution::kLow, &lazy_now); - EXPECT_FALSE(time_domain_->HasPendingHighResolutionTasks()); + EXPECT_FALSE(time_domain_->has_pending_high_resolution_tasks()); // Tidy up. q1.UnregisterTaskQueue(); diff --git a/chromium/base/task/sequence_manager/work_deduplicator.cc b/chromium/base/task/sequence_manager/work_deduplicator.cc index 60cfdbe60ef..f72eb7c7d88 100644 --- a/chromium/base/task/sequence_manager/work_deduplicator.cc +++ b/chromium/base/task/sequence_manager/work_deduplicator.cc @@ -5,7 +5,7 @@ #include "base/task/sequence_manager/work_deduplicator.h" #include <utility> -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace sequence_manager { @@ -59,36 +59,17 @@ WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::DidCheckForMoreWork( NextTask next_task) { DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker); DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag); - last_work_check_result_ = ShouldScheduleWork::kScheduleImmediate; if (next_task == NextTask::kIsImmediate) { state_.store(State::kDoWorkPending); - } else { - // Another thread may have set kPendingDoWorkFlag between - // WillCheckForMoreWork() and here, if so we should return - // ShouldScheduleWork::kScheduleImmediate. Otherwise we don't need to - // schedule an immediate continuation. - if (!(state_.fetch_and(~kInDoWorkFlag) & kPendingDoWorkFlag)) - last_work_check_result_ = ShouldScheduleWork::kNotNeeded; + return ShouldScheduleWork::kScheduleImmediate; } - return last_work_check_result_; -} - -void WorkDeduplicator::OnDelayedWorkStarted() { - DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker); - OnWorkStarted(); -} - -WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnDelayedWorkEnded( - NextTask next_task) { - DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker); - ShouldScheduleWork prev_last_work_check_result = last_work_check_result_; - WorkDeduplicator::ShouldScheduleWork should_schedule_work = - DidCheckForMoreWork(next_task); - if (prev_last_work_check_result == ShouldScheduleWork::kScheduleImmediate) { - prev_last_work_check_result = ShouldScheduleWork::kNotNeeded; - should_schedule_work = ShouldScheduleWork::kNotNeeded; - } - return should_schedule_work; + // If |next_task| is not immediate, there's still a possibility that + // OnWorkRequested() was invoked racily from another thread just after this + // thread determined that the next task wasn't immediate. In that case, that + // other thread relies on us to return kScheduleImmediate. + return (state_.fetch_and(~kInDoWorkFlag) & kPendingDoWorkFlag) + ? ShouldScheduleWork::kScheduleImmediate + : ShouldScheduleWork::kNotNeeded; } } // namespace internal diff --git a/chromium/base/task/sequence_manager/work_deduplicator.h b/chromium/base/task/sequence_manager/work_deduplicator.h index e810cadc30d..5a21f3a696b 100644 --- a/chromium/base/task/sequence_manager/work_deduplicator.h +++ b/chromium/base/task/sequence_manager/work_deduplicator.h @@ -72,18 +72,18 @@ class BASE_EXPORT WorkDeduplicator { // B: return ShouldScheduleWork::kNotNeeded because we're in a DoWork. // C: return ShouldScheduleWork::kNotNeeded because we're in a DoWork, however // DidCheckForMoreWork should subsequently return - // ShouldScheduleWork::kSchedule. - // D: If DidCheckForMoreWork(kIsImmediate::kIsImmediate) was called then it + // ShouldScheduleWork::kScheduleImmediate. + // D: If DidCheckForMoreWork(NextTask::kIsImmediate) was called then it // should ShouldScheduleWork::kNotNeeded because there's a pending DoWork. - // Otherwise it should return ShouldScheduleWork::kSchedule, but a + // Otherwise it should return ShouldScheduleWork::kScheduleImmediate, but a // subsequent call to OnWorkRequested should return // ShouldScheduleWork::kNotNeeded because there's now a pending DoWork. ShouldScheduleWork OnWorkRequested(); - // Returns ShouldScheduleWork::kSchedule if it's OK to schedule a + // Returns ShouldScheduleWork::kScheduleImmediate if it's OK to schedule a // DoDelayedWork without risk of redundancy. Deduplication of delayed work is // assumed to have been done by the caller, the purpose of this method it to - // check if there's a pending Do(Some)Work which would schedule a delayed + // check if there's a pending DoWork which would schedule a delayed // continuation as needed. // // Returns ShouldScheduleWork::kNotNeeded if: @@ -112,22 +112,13 @@ class BASE_EXPORT WorkDeduplicator { kIsDelayed, }; - // Marks us as exiting DoWork. Returns ShouldScheduleWork::kSchedule if an - // immediate DoWork continuation should be posted. This method takes into - // account any OnWorkRequested's called between BeforeComputeDelayTillNextTask - // and here. Must be called on the associated thread. + // Marks us as exiting DoWork. Returns ShouldScheduleWork::kScheduleImmediate + // if an immediate DoWork continuation should be posted. This method + // atomically takes into account any OnWorkRequested's called between + // gathering information about |next_task| and this call. Must be called on + // the associated thread. ShouldScheduleWork DidCheckForMoreWork(NextTask next_task); - // For ThreadControllerWithMessagePumpImpl. The MessagePump calls DoWork and - // DoDelayed work sequentially. If DoWork returns - // ShouldScheduleWork::kSchedule, the pump will call ScheduleWork. We remember - // if DoWork will be scheduled so we don't accidentally call it twice from - // DoDelayedWork. Must be called on the associated thread. - // TODO(alexclarke): Remove these when the DoWork/DoDelayed work merger - // happens. - void OnDelayedWorkStarted(); - ShouldScheduleWork OnDelayedWorkEnded(NextTask next_task); - private: enum Flags { kInDoWorkFlag = 1 << 0, @@ -145,9 +136,6 @@ class BASE_EXPORT WorkDeduplicator { std::atomic<int> state_{State::kUnbound}; scoped_refptr<AssociatedThreadId> associated_thread_; - - // TODO(alexclarke): Remove when the DoWork/DoDelayed work merger happens. - ShouldScheduleWork last_work_check_result_ = ShouldScheduleWork::kNotNeeded; }; } // namespace internal diff --git a/chromium/base/task/sequence_manager/work_deduplicator_unittest.cc b/chromium/base/task/sequence_manager/work_deduplicator_unittest.cc index f11dae96694..435e7bbbdce 100644 --- a/chromium/base/task/sequence_manager/work_deduplicator_unittest.cc +++ b/chromium/base/task/sequence_manager/work_deduplicator_unittest.cc @@ -28,7 +28,7 @@ TEST(WorkDeduplicatorTest, OnWorkRequestedUnBound) { work_deduplicator.BindToCurrentThread()); } -TEST(WorkDeduplicatorTest, OnWorkRequestedBeforeDoWork) { +TEST(WorkDeduplicatorTest, OnWorkRequestedOnWorkStarted) { WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); work_deduplicator.BindToCurrentThread(); @@ -178,7 +178,8 @@ TEST(WorkDeduplicatorTest, OnDelayedWorkRequestedInDoWork) { work_deduplicator.DidCheckForMoreWork(NextTask::kIsImmediate); } -TEST(WorkDeduplicatorTest, OnDelayedWorkRequestedAfterDoWorkWithMoreWork) { +TEST(WorkDeduplicatorTest, + OnDelayedWorkRequestedDidCheckForMoreWorkWithMoreWork) { WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); work_deduplicator.BindToCurrentThread(); @@ -190,7 +191,8 @@ TEST(WorkDeduplicatorTest, OnDelayedWorkRequestedAfterDoWorkWithMoreWork) { work_deduplicator.OnDelayedWorkRequested()); } -TEST(WorkDeduplicatorTest, OnDelayedWorkRequestedAfterDoWorkWithNoMoreWork) { +TEST(WorkDeduplicatorTest, + OnDelayedWorkRequestedDidCheckForMoreWorkWithNoMoreWork) { WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); work_deduplicator.BindToCurrentThread(); @@ -212,63 +214,6 @@ TEST(WorkDeduplicatorTest, OnDelayedWorkRequestedWithDoWorkPending) { work_deduplicator.OnDelayedWorkRequested()); } -TEST(WorkDeduplicatorTest, DoDelayedWorkWithNoMoreWorkAfterDoWorkWithMoreWork) { - WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); - work_deduplicator.BindToCurrentThread(); - - work_deduplicator.OnWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - work_deduplicator.DidCheckForMoreWork(NextTask::kIsImmediate); - - work_deduplicator.OnDelayedWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - EXPECT_EQ(ShouldScheduleWork::kNotNeeded, - work_deduplicator.OnDelayedWorkEnded(NextTask::kIsDelayed)); -} - -TEST(WorkDeduplicatorTest, DoDelayedWorkWithMoreWorkAfterDoWorkWithMoreWork) { - WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); - work_deduplicator.BindToCurrentThread(); - - work_deduplicator.OnWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - work_deduplicator.DidCheckForMoreWork(NextTask::kIsImmediate); - - work_deduplicator.OnDelayedWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - EXPECT_EQ(ShouldScheduleWork::kNotNeeded, - work_deduplicator.OnDelayedWorkEnded(NextTask::kIsImmediate)); -} - -TEST(WorkDeduplicatorTest, DoDelayedWorkWithMoreWorkAfterDoWorkWithNoMoreWork) { - WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); - work_deduplicator.BindToCurrentThread(); - - work_deduplicator.OnWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - work_deduplicator.DidCheckForMoreWork(NextTask::kIsDelayed); - - work_deduplicator.OnDelayedWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - EXPECT_EQ(ShouldScheduleWork::kScheduleImmediate, - work_deduplicator.OnDelayedWorkEnded(NextTask::kIsImmediate)); -} - -TEST(WorkDeduplicatorTest, - DoDelayedWorkWithNoMoreWorkAfterDoWorkWithNoMoreWork) { - WorkDeduplicator work_deduplicator(AssociatedThreadId::CreateBound()); - work_deduplicator.BindToCurrentThread(); - - work_deduplicator.OnWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - work_deduplicator.DidCheckForMoreWork(NextTask::kIsDelayed); - - work_deduplicator.OnDelayedWorkStarted(); - work_deduplicator.WillCheckForMoreWork(); - EXPECT_EQ(ShouldScheduleWork::kNotNeeded, - work_deduplicator.OnDelayedWorkEnded(NextTask::kIsDelayed)); -} - } // namespace internal } // namespace sequence_manager } // namespace base diff --git a/chromium/base/task/sequence_manager/work_queue.cc b/chromium/base/task/sequence_manager/work_queue.cc index 2dfd04da230..836f00034b9 100644 --- a/chromium/base/task/sequence_manager/work_queue.cc +++ b/chromium/base/task/sequence_manager/work_queue.cc @@ -4,8 +4,10 @@ #include "base/task/sequence_manager/work_queue.h" +#include "base/debug/alias.h" #include "base/task/sequence_manager/sequence_manager_impl.h" #include "base/task/sequence_manager/work_queue_sets.h" +#include "build/build_config.h" namespace base { namespace sequence_manager { @@ -200,8 +202,31 @@ bool WorkQueue::RemoveAllCanceledTasksFromFront() { if (!work_queue_sets_) return false; bool task_removed = false; - while (!tasks_.empty() && - (!tasks_.front().task || tasks_.front().task.IsCancelled())) { + while (!tasks_.empty()) { + const auto& pending_task = tasks_.front(); +#if !defined(OS_NACL) + // Record some debugging information about the task. + // TODO(skyostil): Remove once crbug.com/1071475 is resolved. + DEBUG_ALIAS_FOR_CSTR(debug_file_name, + pending_task.posted_from.file_name() + ? pending_task.posted_from.file_name() + : "", + 16); + DEBUG_ALIAS_FOR_CSTR(debug_function_name, + pending_task.posted_from.function_name() + ? pending_task.posted_from.function_name() + : "", + 16); + int debug_line_number = pending_task.posted_from.line_number(); + const void* debug_pc = pending_task.posted_from.program_counter(); + const void* debug_bind_state = + reinterpret_cast<const void*>(&pending_task.task); + base::debug::Alias(&debug_line_number); + base::debug::Alias(&debug_pc); + base::debug::Alias(&debug_bind_state); +#endif // !defined(OS_NACL) + if (pending_task.task && !pending_task.task.IsCancelled()) + break; tasks_.pop_front(); task_removed = true; } diff --git a/chromium/base/task/sequence_manager/work_queue_sets.cc b/chromium/base/task/sequence_manager/work_queue_sets.cc index 68ec9613338..937b84b23d6 100644 --- a/chromium/base/task/sequence_manager/work_queue_sets.cc +++ b/chromium/base/task/sequence_manager/work_queue_sets.cc @@ -4,7 +4,7 @@ #include "base/task/sequence_manager/work_queue_sets.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace sequence_manager { diff --git a/chromium/base/task/task_traits.cc b/chromium/base/task/task_traits.cc index 4ebb87a782c..5f178bc2f1b 100644 --- a/chromium/base/task/task_traits.cc +++ b/chromium/base/task/task_traits.cc @@ -8,7 +8,7 @@ #include <ostream> -#include "base/logging.h" +#include "base/notreached.h" namespace base { diff --git a/chromium/base/task/task_traits.h b/chromium/base/task/task_traits.h index 04f63434121..ac2e3a89a2d 100644 --- a/chromium/base/task/task_traits.h +++ b/chromium/base/task/task_traits.h @@ -37,11 +37,12 @@ class PostTaskAndroid; enum class TaskPriority : uint8_t { // This will always be equal to the lowest priority available. LOWEST = 0, - // This task will only start running when machine resources are available. The - // application may preempt the task if it expects that resources will soon be - // needed by work of higher priority. Dependending on the ThreadPolicy, the - // task may run on a thread that is likely to be descheduled when higher - // priority work arrives (in this process or another). + // Best effort tasks will only start running when machine resources are + // available. The application may preempt best effort tasks if it expects that + // resources will soon be needed by work of higher priority. Dependending on + // the ThreadPolicy, best effort tasks may run on a thread that is likely to + // be descheduled when higher priority work arrives (in this process or + // another). // // Examples: // - Reporting metrics. @@ -51,7 +52,7 @@ enum class TaskPriority : uint8_t { // when that user interactions happens). BEST_EFFORT = LOWEST, - // The result of this task is visible to the user (in the UI or as a + // The result of user visible tasks is visible to the user (in the UI or as a // side-effect on the system) but it is not an immediate response to a user // interaction. // @@ -61,7 +62,7 @@ enum class TaskPriority : uint8_t { // - Loading an image that is displayed in the UI but is non-critical. USER_VISIBLE, - // This task affects UI immediately after a user interaction. + // User blocking tasks affects UI immediately after a user interaction. // // Example: // - Loading and rendering a web page after the user clicks a link. @@ -143,12 +144,16 @@ enum class TaskShutdownBehavior : uint8_t { // thread priority in order to avoid priority inversions. Please consult with // //base/task/OWNERS if you suspect a priority inversion. enum class ThreadPolicy : uint8_t { - // The task runs at background thread priority if: + // The task runs on a background priority thread if: // - The TaskPriority is BEST_EFFORT. // - Background thread priority is supported by the platform (see // environment_config_unittest.cc). // - No extension trait (e.g. BrowserThread) is used. - // Otherwise, it runs at normal thread priority. + // - ThreadPoolInstance::Shutdown() hadn't been called when the task started running. + // (Remaining TaskShutdownBehavior::BLOCK_SHUTDOWN tasks use foreground + // threads during shutdown regardless of TaskPriority) + // Otherwise, it runs on a normal priority thread. + // This is the default. PREFER_BACKGROUND, // The task runs at normal thread priority, irrespective of its TaskPriority. diff --git a/chromium/base/task/thread_pool.cc b/chromium/base/task/thread_pool.cc index f5635d0ea65..24e2bba8a37 100644 --- a/chromium/base/task/thread_pool.cc +++ b/chromium/base/task/thread_pool.cc @@ -4,7 +4,7 @@ #include "base/task/thread_pool.h" -#include "base/logging.h" +#include "base/check.h" #include "base/task/scoped_set_task_priority_for_current_thread.h" #include "base/task/task_traits.h" #include "base/task/thread_pool/thread_pool_impl.h" diff --git a/chromium/base/task/thread_pool.h b/chromium/base/task/thread_pool.h index 67899dfcfe1..085c249ba58 100644 --- a/chromium/base/task/thread_pool.h +++ b/chromium/base/task/thread_pool.h @@ -126,8 +126,8 @@ class BASE_EXPORT ThreadPool { const Location& from_here, CallbackType<TaskReturnType()> task, CallbackType<void(ReplyArgType)> reply) { - return ThreadPool::PostTaskAndReplyWithResult(from_here, std::move(task), - std::move(reply)); + return ThreadPool::PostTaskAndReplyWithResult( + from_here, {}, std::move(task), std::move(reply)); } // Posts |task| with specific |traits|. Returns false if the task definitely diff --git a/chromium/base/task/thread_pool/delayed_task_manager.cc b/chromium/base/task/thread_pool/delayed_task_manager.cc index 3b0d3d5f831..f5a01aed70f 100644 --- a/chromium/base/task/thread_pool/delayed_task_manager.cc +++ b/chromium/base/task/thread_pool/delayed_task_manager.cc @@ -7,7 +7,7 @@ #include <algorithm> #include "base/bind.h" -#include "base/logging.h" +#include "base/check.h" #include "base/sequenced_task_runner.h" #include "base/task/post_task.h" #include "base/task/thread_pool/task.h" diff --git a/chromium/base/task/thread_pool/job_task_source.cc b/chromium/base/task/thread_pool/job_task_source.cc index 77039dcbe17..c86e3e0f118 100644 --- a/chromium/base/task/thread_pool/job_task_source.cc +++ b/chromium/base/task/thread_pool/job_task_source.cc @@ -8,8 +8,9 @@ #include "base/bind.h" #include "base/bind_helpers.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/memory/ptr_util.h" +#include "base/task/common/checked_lock.h" #include "base/task/task_features.h" #include "base/task/thread_pool/pooled_task_runner_delegate.h" #include "base/threading/thread_restrictions.h" @@ -145,6 +146,7 @@ JobTaskSource::JobTaskSource( worker_task_(std::move(worker_task)), primary_task_(base::BindRepeating( [](JobTaskSource* self) { + CheckedLock::AssertNoLockHeldOnCurrentThread(); // Each worker task has its own delegate with associated state. JobDelegate job_delegate{self, self->delegate_}; self->worker_task_.Run(&job_delegate); diff --git a/chromium/base/task/thread_pool/priority_queue.cc b/chromium/base/task/thread_pool/priority_queue.cc index bbef766775a..e2798b7009a 100644 --- a/chromium/base/task/thread_pool/priority_queue.cc +++ b/chromium/base/task/thread_pool/priority_queue.cc @@ -6,7 +6,7 @@ #include <utility> -#include "base/logging.h" +#include "base/check_op.h" #include "base/memory/ptr_util.h" #include "base/stl_util.h" diff --git a/chromium/base/task/thread_pool/sequence.cc b/chromium/base/task/thread_pool/sequence.cc index 25a76efc6c4..8ed7dd4881e 100644 --- a/chromium/base/task/thread_pool/sequence.cc +++ b/chromium/base/task/thread_pool/sequence.cc @@ -7,9 +7,9 @@ #include <utility> #include "base/bind.h" +#include "base/check.h" #include "base/critical_closure.h" #include "base/feature_list.h" -#include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/task/task_features.h" #include "base/time/time.h" @@ -45,7 +45,8 @@ void Sequence::Transaction::PushTask(Task task) { task.task = sequence()->traits_.shutdown_behavior() == TaskShutdownBehavior::BLOCK_SHUTDOWN - ? MakeCriticalClosure(std::move(task.task)) + ? MakeCriticalClosure(task.posted_from.ToString(), + std::move(task.task)) : std::move(task.task); sequence()->queue_.push(std::move(task)); diff --git a/chromium/base/task/thread_pool/service_thread.cc b/chromium/base/task/thread_pool/service_thread.cc index 2b94745dd88..6b4afa9af78 100644 --- a/chromium/base/task/thread_pool/service_thread.cc +++ b/chromium/base/task/thread_pool/service_thread.cc @@ -57,8 +57,9 @@ void ServiceThread::Init() { } NOINLINE void ServiceThread::Run(RunLoop* run_loop) { - const int line_number = __LINE__; Thread::Run(run_loop); + // Inhibit tail calls of Run and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } diff --git a/chromium/base/task/thread_pool/task_source.cc b/chromium/base/task/thread_pool/task_source.cc index 76454c3bd9d..82d7d595cbd 100644 --- a/chromium/base/task/thread_pool/task_source.cc +++ b/chromium/base/task/thread_pool/task_source.cc @@ -6,8 +6,8 @@ #include <utility> +#include "base/check_op.h" #include "base/feature_list.h" -#include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/task/task_features.h" #include "base/task/thread_pool/task_tracker.h" diff --git a/chromium/base/task/thread_pool/task_tracker.cc b/chromium/base/task/thread_pool/task_tracker.cc index bccabc4ca2f..1b02bf0f75a 100644 --- a/chromium/base/task/thread_pool/task_tracker.cc +++ b/chromium/base/task/thread_pool/task_tracker.cc @@ -12,7 +12,6 @@ #include "base/callback.h" #include "base/command_line.h" #include "base/compiler_specific.h" -#include "base/debug/alias.h" #include "base/json/json_writer.h" #include "base/memory/ptr_util.h" #include "base/metrics/histogram_macros.h" @@ -760,21 +759,15 @@ void TaskTracker::CallFlushCallbackForTesting() { } NOINLINE void TaskTracker::RunContinueOnShutdown(Task* task) { - const int line_number = __LINE__; task_annotator_.RunTask("ThreadPool_RunTask_ContinueOnShutdown", task); - base::debug::Alias(&line_number); } NOINLINE void TaskTracker::RunSkipOnShutdown(Task* task) { - const int line_number = __LINE__; task_annotator_.RunTask("ThreadPool_RunTask_SkipOnShutdown", task); - base::debug::Alias(&line_number); } NOINLINE void TaskTracker::RunBlockShutdown(Task* task) { - const int line_number = __LINE__; task_annotator_.RunTask("ThreadPool_RunTask_BlockShutdown", task); - base::debug::Alias(&line_number); } void TaskTracker::RunTaskWithShutdownBehavior( diff --git a/chromium/base/task/thread_pool/task_tracker_unittest.cc b/chromium/base/task/thread_pool/task_tracker_unittest.cc index 8440af8d8e4..e88fa46d274 100644 --- a/chromium/base/task/thread_pool/task_tracker_unittest.cc +++ b/chromium/base/task/thread_pool/task_tracker_unittest.cc @@ -13,7 +13,7 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/callback.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/memory/ptr_util.h" #include "base/memory/ref_counted.h" diff --git a/chromium/base/task/thread_pool/test_task_factory.cc b/chromium/base/task/thread_pool/test_task_factory.cc index c1b84cc16e2..9d36b80ba60 100644 --- a/chromium/base/task/thread_pool/test_task_factory.cc +++ b/chromium/base/task/thread_pool/test_task_factory.cc @@ -7,8 +7,8 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/callback.h" +#include "base/check_op.h" #include "base/location.h" -#include "base/logging.h" #include "base/synchronization/waitable_event.h" #include "base/threading/sequenced_task_runner_handle.h" #include "base/threading/thread_task_runner_handle.h" diff --git a/chromium/base/task/thread_pool/thread_group_impl.cc b/chromium/base/task/thread_pool/thread_group_impl.cc index d41c327169f..ff5efaae886 100644 --- a/chromium/base/task/thread_pool/thread_group_impl.cc +++ b/chromium/base/task/thread_pool/thread_group_impl.cc @@ -185,6 +185,8 @@ class ThreadGroupImpl::ScopedCommandsExecutor // and is woken up immediately after. workers_to_start_.ForEachWorker([&](WorkerThread* worker) { worker->Start(outer_->after_start().worker_thread_observer); + if (outer_->worker_started_for_testing_) + outer_->worker_started_for_testing_->Wait(); }); if (must_schedule_adjust_max_tasks_) @@ -413,6 +415,7 @@ void ThreadGroupImpl::Start( scoped_refptr<SequencedTaskRunner> service_thread_task_runner, WorkerThreadObserver* worker_thread_observer, WorkerEnvironment worker_environment, + bool synchronous_thread_start_for_testing, Optional<TimeDelta> may_block_threshold) { DCHECK(!replacement_thread_group_); @@ -447,6 +450,14 @@ void ThreadGroupImpl::Start( in_start().initialized = true; #endif + if (synchronous_thread_start_for_testing) { + worker_started_for_testing_.emplace(WaitableEvent::ResetPolicy::AUTOMATIC); + // Don't emit a ScopedBlockingCallWithBaseSyncPrimitives from this + // WaitableEvent or it defeats the purpose of having threads start without + // externally visible side-effects. + worker_started_for_testing_->declare_only_used_while_idle(); + } + EnsureEnoughWorkersLockRequired(&executor); } @@ -611,6 +622,14 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnMainEntry( outer_->BindToCurrentThread(); SetBlockingObserverForCurrentThread(this); + + if (outer_->worker_started_for_testing_) { + // When |worker_started_for_testing_| is set, the thread that starts workers + // should wait for a worker to have started before starting the next one, + // and there should only be one thread that wakes up workers at a time. + DCHECK(!outer_->worker_started_for_testing_->IsSignaled()); + outer_->worker_started_for_testing_->Signal(); + } } RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork( diff --git a/chromium/base/task/thread_pool/thread_group_impl.h b/chromium/base/task/thread_pool/thread_group_impl.h index 7843cab4920..26f7da99dc3 100644 --- a/chromium/base/task/thread_pool/thread_group_impl.h +++ b/chromium/base/task/thread_pool/thread_group_impl.h @@ -72,14 +72,17 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup { // |worker_environment| specifies the environment in which tasks are executed. // |may_block_threshold| is the timeout after which a task in a MAY_BLOCK // ScopedBlockingCall is considered blocked (the thread group will choose an - // appropriate value if none is specified). Can only be called once. CHECKs on - // failure. + // appropriate value if none is specified). + // |synchronous_thread_start_for_testing| is true if this ThreadGroupImpl + // should synchronously wait for OnMainEntry() after starting each worker. Can + // only be called once. CHECKs on failure. void Start(int max_tasks, int max_best_effort_tasks, TimeDelta suggested_reclaim_time, scoped_refptr<SequencedTaskRunner> service_thread_task_runner, WorkerThreadObserver* worker_thread_observer, WorkerEnvironment worker_environment, + bool synchronous_thread_start_for_testing = false, Optional<TimeDelta> may_block_threshold = Optional<TimeDelta>()); // Destroying a ThreadGroupImpl returned by Create() is not allowed in @@ -330,6 +333,11 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup { // Set at the start of JoinForTesting(). bool join_for_testing_started_ GUARDED_BY(lock_) = false; + // Null-opt unless |synchronous_thread_start_for_testing| was true at + // construction. In that case, it's signaled each time + // WorkerThreadDelegateImpl::OnMainEntry() completes. + Optional<WaitableEvent> worker_started_for_testing_; + // Cached HistogramBase pointers, can be accessed without // holding |lock_|. If |lock_| is held, add new samples using // ThreadGroupImpl::ScopedCommandsExecutor (increase diff --git a/chromium/base/task/thread_pool/thread_group_impl_unittest.cc b/chromium/base/task/thread_pool/thread_group_impl_unittest.cc index ab442ad8a84..85a1e9b5541 100644 --- a/chromium/base/task/thread_pool/thread_group_impl_unittest.cc +++ b/chromium/base/task/thread_pool/thread_group_impl_unittest.cc @@ -104,7 +104,8 @@ class ThreadGroupImplImplTestBase : public ThreadGroup::Delegate { max_tasks, max_best_effort_tasks ? max_best_effort_tasks.value() : max_tasks, suggested_reclaim_time, service_thread_.task_runner(), worker_observer, - ThreadGroup::WorkerEnvironment::NONE, may_block_threshold); + ThreadGroup::WorkerEnvironment::NONE, + /* synchronous_thread_start_for_testing=*/false, may_block_threshold); } void CreateAndStartThreadGroup( diff --git a/chromium/base/task/thread_pool/thread_pool_impl.cc b/chromium/base/task/thread_pool/thread_pool_impl.cc index f24cbe0b1e1..6f3bd2edadc 100644 --- a/chromium/base/task/thread_pool/thread_pool_impl.cc +++ b/chromium/base/task/thread_pool/thread_pool_impl.cc @@ -28,6 +28,7 @@ #include "base/task/thread_pool/task.h" #include "base/task/thread_pool/task_source.h" #include "base/task/thread_pool/thread_group_impl.h" +#include "base/task/thread_pool/worker_thread.h" #include "base/threading/platform_thread.h" #include "base/time/time.h" @@ -61,6 +62,12 @@ bool HasDisableBestEffortTasksSwitch() { switches::kDisableBestEffortTasks); } +// A global variable that can be set from test fixtures while no +// ThreadPoolInstance is active. Global instead of being a member variable to +// avoid having to add a public API to ThreadPoolInstance::InitParams for this +// internal edge case. +bool g_synchronous_thread_start_for_testing = false; + } // namespace ThreadPoolImpl::ThreadPoolImpl(StringPiece histogram_label) @@ -151,6 +158,8 @@ void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params, #endif service_thread_options.timer_slack = TIMER_SLACK_MAXIMUM; CHECK(service_thread_->StartWithOptions(service_thread_options)); + if (g_synchronous_thread_start_for_testing) + service_thread_->WaitUntilThreadStarted(); #if defined(OS_POSIX) && !defined(OS_NACL_SFI) // Needs to happen after starting the service thread to get its @@ -203,7 +212,8 @@ void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params, static_cast<ThreadGroupImpl*>(foreground_thread_group_.get()) ->Start(init_params.max_num_foreground_threads, max_best_effort_tasks, suggested_reclaim_time, service_thread_task_runner, - worker_thread_observer, worker_environment); + worker_thread_observer, worker_environment, + g_synchronous_thread_start_for_testing); } if (background_thread_group_) { @@ -217,7 +227,8 @@ void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params, ? ThreadGroup::WorkerEnvironment::NONE : #endif - worker_environment); + worker_environment, + g_synchronous_thread_start_for_testing); } started_ = true; @@ -280,6 +291,12 @@ void ThreadPoolImpl::ProcessRipeDelayedTasksForTesting() { delayed_task_manager_.ProcessRipeTasks(); } +// static +void ThreadPoolImpl::SetSynchronousThreadStartForTesting(bool enabled) { + DCHECK(!ThreadPoolInstance::Get()); + g_synchronous_thread_start_for_testing = enabled; +} + int ThreadPoolImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated( const TaskTraits& traits) const { // This method does not support getting the maximum number of BEST_EFFORT diff --git a/chromium/base/task/thread_pool/thread_pool_impl.h b/chromium/base/task/thread_pool/thread_pool_impl.h index f6bbdd22f62..57a23fd0e0e 100644 --- a/chromium/base/task/thread_pool/thread_pool_impl.h +++ b/chromium/base/task/thread_pool/thread_pool_impl.h @@ -119,6 +119,13 @@ class BASE_EXPORT ThreadPoolImpl : public ThreadPoolInstance, // advances faster than the real-time delay on ServiceThread). void ProcessRipeDelayedTasksForTesting(); + // Requests that all threads started by future ThreadPoolImpls in this process + // have a synchronous start (if |enabled|; cancels this behavior otherwise). + // Must be called while no ThreadPoolImpls are alive in this process. This is + // exposed here on this internal API rather than as a ThreadPoolInstance + // configuration param because only one internal test truly needs this. + static void SetSynchronousThreadStartForTesting(bool enabled); + private: // Invoked after |num_fences_| or |num_best_effort_fences_| is updated. Sets // the CanRunPolicy in TaskTracker and wakes up workers as appropriate. diff --git a/chromium/base/task/thread_pool/thread_pool_instance.cc b/chromium/base/task/thread_pool/thread_pool_instance.cc index 4b70ee411ea..ae3549b6a55 100644 --- a/chromium/base/task/thread_pool/thread_pool_instance.cc +++ b/chromium/base/task/thread_pool/thread_pool_instance.cc @@ -6,7 +6,7 @@ #include <algorithm> -#include "base/logging.h" +#include "base/check.h" #include "base/memory/ptr_util.h" #include "base/system/sys_info.h" #include "base/task/thread_pool/thread_pool_impl.h" diff --git a/chromium/base/task/thread_pool/worker_thread.cc b/chromium/base/task/thread_pool/worker_thread.cc index 61b7358e127..79e5105b933 100644 --- a/chromium/base/task/thread_pool/worker_thread.cc +++ b/chromium/base/task/thread_pool/worker_thread.cc @@ -8,9 +8,9 @@ #include <utility> +#include "base/check_op.h" #include "base/compiler_specific.h" #include "base/debug/alias.h" -#include "base/logging.h" #include "base/task/thread_pool/environment_config.h" #include "base/task/thread_pool/task_tracker.h" #include "base/task/thread_pool/worker_thread_observer.h" @@ -220,63 +220,73 @@ void WorkerThread::ThreadMain() { } NOINLINE void WorkerThread::RunPooledWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunBackgroundPooledWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunSharedWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunBackgroundSharedWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunDedicatedWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunBackgroundDedicatedWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } #if defined(OS_WIN) NOINLINE void WorkerThread::RunSharedCOMWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunBackgroundSharedCOMWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunDedicatedCOMWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } NOINLINE void WorkerThread::RunBackgroundDedicatedCOMWorker() { - const int line_number = __LINE__; RunWorker(); + // Inhibit tail calls of RunWorker and inhibit code folding. + const int line_number = __LINE__; base::debug::Alias(&line_number); } #endif // defined(OS_WIN) diff --git a/chromium/base/task/thread_pool/worker_thread_stack.cc b/chromium/base/task/thread_pool/worker_thread_stack.cc index 40eadde9695..75bc5c6f6fd 100644 --- a/chromium/base/task/thread_pool/worker_thread_stack.cc +++ b/chromium/base/task/thread_pool/worker_thread_stack.cc @@ -6,7 +6,7 @@ #include <algorithm> -#include "base/logging.h" +#include "base/check_op.h" #include "base/stl_util.h" #include "base/task/thread_pool/worker_thread.h" diff --git a/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc b/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc index 1cc192568b9..350f94ad17b 100644 --- a/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc +++ b/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc @@ -4,7 +4,7 @@ #include "base/task/thread_pool/worker_thread_stack.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/memory/ref_counted.h" #include "base/task/thread_pool/task_source.h" #include "base/task/thread_pool/task_tracker.h" diff --git a/chromium/base/task/thread_pool/worker_thread_unittest.cc b/chromium/base/task/thread_pool/worker_thread_unittest.cc index 46516636251..709c38d7767 100644 --- a/chromium/base/task/thread_pool/worker_thread_unittest.cc +++ b/chromium/base/task/thread_pool/worker_thread_unittest.cc @@ -531,6 +531,8 @@ TEST(ThreadPoolWorkerTest, WorkerCleanupFromGetWork) { controls->WaitForWorkToRun(); Mock::VerifyAndClear(delegate); controls->WaitForMainExit(); + // Join the worker to avoid leaks. + worker->JoinForTesting(); } TEST(ThreadPoolWorkerTest, WorkerCleanupDuringWork) { diff --git a/chromium/base/task/thread_pool_unittest.cc b/chromium/base/task/thread_pool_unittest.cc new file mode 100644 index 00000000000..867068bfd9d --- /dev/null +++ b/chromium/base/task/thread_pool_unittest.cc @@ -0,0 +1,44 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/task/thread_pool.h" + +#include "base/bind.h" +#include "base/location.h" +#include "base/run_loop.h" +#include "base/test/bind_test_util.h" +#include "base/test/task_environment.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +TEST(ThreadPool, PostTaskAndReplyWithResultThreeArgs) { + base::test::TaskEnvironment env; + + base::RunLoop run_loop; + base::ThreadPool::PostTaskAndReplyWithResult( + FROM_HERE, base::BindOnce([]() { return 3; }), + base::OnceCallback<void(int)>( + base::BindLambdaForTesting([&run_loop](int x) { + EXPECT_EQ(x, 3); + run_loop.Quit(); + }))); + run_loop.Run(); +} + +TEST(ThreadPool, PostTaskAndReplyWithResultFourArgs) { + base::test::TaskEnvironment env; + + base::RunLoop run_loop; + base::ThreadPool::PostTaskAndReplyWithResult( + FROM_HERE, /*traits=*/{}, base::BindOnce([]() { return 3; }), + base::OnceCallback<void(int)>( + base::BindLambdaForTesting([&run_loop](int x) { + EXPECT_EQ(x, 3); + run_loop.Quit(); + }))); + run_loop.Run(); +} + +} // namespace base diff --git a/chromium/base/task_runner.cc b/chromium/base/task_runner.cc index 77726b9da4f..348c79f35b2 100644 --- a/chromium/base/task_runner.cc +++ b/chromium/base/task_runner.cc @@ -7,8 +7,8 @@ #include <utility> #include "base/bind.h" +#include "base/check.h" #include "base/compiler_specific.h" -#include "base/logging.h" #include "base/threading/post_task_and_reply_impl.h" namespace base { diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn index c48169c1eef..86b11128e51 100644 --- a/chromium/base/test/BUILD.gn +++ b/chromium/base/test/BUILD.gn @@ -472,7 +472,7 @@ if (is_android) { "//base:base_java_test_support", "//testing/android/native_test:native_main_runner_java", "//third_party/android_deps:androidx_annotation_annotation_java", - "//third_party/jsr-305:jsr_305_javalib", + "//third_party/android_deps:com_google_code_findbugs_jsr305_java", ] srcjar_deps = [ ":test_support_java_aidl" ] sources = [ diff --git a/chromium/base/test/DEPS b/chromium/base/test/DEPS new file mode 100644 index 00000000000..131691a74ea --- /dev/null +++ b/chromium/base/test/DEPS @@ -0,0 +1,3 @@ +include_rules = [ + "+third_party/libxml/chromium", +] diff --git a/chromium/base/test/OWNERS b/chromium/base/test/OWNERS new file mode 100644 index 00000000000..08d2b4c340d --- /dev/null +++ b/chromium/base/test/OWNERS @@ -0,0 +1,15 @@ +# Metrics-related test utilites: +per-file *scoped_feature_list*=file://base/metrics/OWNERS + +# Tracing test utilities: +per-file trace_*=file://base/trace_event/OWNERS + +#For Windows-specific test utilities: +per-file *_win*=file://base/win/OWNERS + +# For Android-specific changes: +per-file *android*=file://base/test/android/OWNERS +per-file BUILD.gn=file://base/test/android/OWNERS + +# Linux fontconfig changes +per-file *fontconfig*=file://base/nix/OWNERS diff --git a/chromium/base/test/bind_test_util.cc b/chromium/base/test/bind_test_util.cc new file mode 100644 index 00000000000..9b1cef836e8 --- /dev/null +++ b/chromium/base/test/bind_test_util.cc @@ -0,0 +1,72 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/bind_test_util.h" + +#include <string> + +#include "base/bind.h" +#include "base/callback.h" +#include "base/location.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace { + +// A helper class for MakeExpectedRunClosure() that fails if it is +// destroyed without Run() having been called. This class may be used +// from multiple threads as long as Run() is called at most once +// before destruction. +class RunChecker { + public: + explicit RunChecker(const Location& location, + StringPiece message, + bool is_repeating) + : location_(location), + message_(message.as_string()), + is_repeating_(is_repeating) {} + + ~RunChecker() { + if (!called_) { + ADD_FAILURE_AT(location_.file_name(), location_.line_number()) + << message_; + } + } + + void Run() { + DCHECK(is_repeating_ || !called_); + called_ = true; + } + + private: + const Location location_; + const std::string message_; + const bool is_repeating_; + bool called_ = false; +}; + +} // namespace + +OnceClosure MakeExpectedRunClosure(const Location& location, + StringPiece message) { + return BindOnce(&RunChecker::Run, + Owned(new RunChecker(location, message, false))); +} + +RepeatingClosure MakeExpectedRunAtLeastOnceClosure(const Location& location, + StringPiece message) { + return BindRepeating(&RunChecker::Run, + Owned(new RunChecker(location, message, true))); +} + +RepeatingClosure MakeExpectedNotRunClosure(const Location& location, + StringPiece message) { + return BindRepeating( + [](const Location& location, StringPiece message) { + ADD_FAILURE_AT(location.file_name(), location.line_number()) << message; + }, + location, message.as_string()); +} + +} // namespace base diff --git a/chromium/base/test/bind_test_util.h b/chromium/base/test/bind_test_util.h new file mode 100644 index 00000000000..85e1f918cff --- /dev/null +++ b/chromium/base/test/bind_test_util.h @@ -0,0 +1,89 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_BIND_TEST_UTIL_H_ +#define BASE_TEST_BIND_TEST_UTIL_H_ + +#include <type_traits> +#include <utility> + +#include "base/bind.h" +#include "base/strings/string_piece.h" + +namespace base { + +class Location; + +namespace internal { + +template <typename Callable, + typename Signature = decltype(&Callable::operator())> +struct HasConstCallOperatorImpl : std::false_type {}; + +template <typename Callable, typename R, typename... Args> +struct HasConstCallOperatorImpl<Callable, R (Callable::*)(Args...) const> + : std::true_type {}; + +template <typename Callable> +constexpr bool HasConstCallOperator = + HasConstCallOperatorImpl<std::decay_t<Callable>>::value; + +template <typename F, typename Signature> +struct BindLambdaHelper; + +template <typename F, typename R, typename... Args> +struct BindLambdaHelper<F, R(Args...)> { + static R Run(const std::decay_t<F>& f, Args... args) { + return f(std::forward<Args>(args)...); + } + + static R RunOnce(std::decay_t<F>&& f, Args... args) { + return f(std::forward<Args>(args)...); + } +}; + +} // namespace internal + +// A variant of BindRepeating() that can bind capturing lambdas for testing. +// This doesn't support extra arguments binding as the lambda itself can do. +template <typename Lambda, + std::enable_if_t<internal::HasConstCallOperator<Lambda>>* = nullptr> +decltype(auto) BindLambdaForTesting(Lambda&& lambda) { + using Signature = internal::ExtractCallableRunType<std::decay_t<Lambda>>; + return BindRepeating(&internal::BindLambdaHelper<Lambda, Signature>::Run, + std::forward<Lambda>(lambda)); +} + +// A variant of BindRepeating() that can bind mutable capturing lambdas for +// testing. This doesn't support extra arguments binding as the lambda itself +// can do. Since a mutable lambda potentially can invalidate its state after +// being run once, this method returns a OnceCallback instead of a +// RepeatingCallback. +template <typename Lambda, + std::enable_if_t<!internal::HasConstCallOperator<Lambda>>* = nullptr> +decltype(auto) BindLambdaForTesting(Lambda&& lambda) { + static_assert( + std::is_rvalue_reference<Lambda&&>() && + !std::is_const<std::remove_reference_t<Lambda>>(), + "BindLambdaForTesting requires non-const rvalue for mutable lambda " + "binding. I.e.: base::BindLambdaForTesting(std::move(lambda))."); + using Signature = internal::ExtractCallableRunType<std::decay_t<Lambda>>; + return BindOnce(&internal::BindLambdaHelper<Lambda, Signature>::RunOnce, + std::move(lambda)); +} + +// Returns a closure that fails on destruction if it hasn't been run. +OnceClosure MakeExpectedRunClosure(const Location& location, + StringPiece message = StringPiece()); +RepeatingClosure MakeExpectedRunAtLeastOnceClosure( + const Location& location, + StringPiece message = StringPiece()); + +// Returns a closure that fails the test if run. +RepeatingClosure MakeExpectedNotRunClosure(const Location& location, + StringPiece message = StringPiece()); + +} // namespace base + +#endif // BASE_TEST_BIND_TEST_UTIL_H_ diff --git a/chromium/base/test/clang_profiling.cc b/chromium/base/test/clang_profiling.cc new file mode 100644 index 00000000000..5681a105e60 --- /dev/null +++ b/chromium/base/test/clang_profiling.cc @@ -0,0 +1,26 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/clang_profiling.h" + +#include "base/no_destructor.h" +#include "base/synchronization/lock.h" + +extern "C" int __llvm_profile_dump(void); + +namespace base { + +void WriteClangProfilingProfile() { + // __llvm_profile_dump() guarantees that it will not dump profiling + // information if it is being called twice or more. However, it is not thread + // safe, as it is supposed to be called from atexit() handler rather than + // being called directly from random places. Since we have to call it + // ourselves, we must ensure thread safety in order to prevent duplication of + // profiling counters. + static base::NoDestructor<base::Lock> lock; + base::AutoLock auto_lock(*lock); + __llvm_profile_dump(); +} + +} // namespace base diff --git a/chromium/base/test/clang_profiling.h b/chromium/base/test/clang_profiling.h new file mode 100644 index 00000000000..52f48e59653 --- /dev/null +++ b/chromium/base/test/clang_profiling.h @@ -0,0 +1,28 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_CLANG_PROFILING_H_ +#define BASE_TEST_CLANG_PROFILING_H_ + +#include "base/clang_profiling_buildflags.h" + +#include "base/base_export.h" + +#if !BUILDFLAG(CLANG_PROFILING) +#error "Clang profiling can only be used if CLANG_PROFILING macro is defined" +#endif + +namespace base { + +// Write out the accumulated code profiling profile to the configured file. +// This is used internally by e.g. base::Process and FATAL logging, to cause +// profiling information to be stored even when performing an "immediate" exit +// (or triggering a debug crash), where the automatic at-exit writer will not +// be invoked. +// This call is thread-safe, and will write profiling data at-most-once. +BASE_EXPORT void WriteClangProfilingProfile(); + +} // namespace base + +#endif // BASE_TEST_CLANG_PROFILING_H_ diff --git a/chromium/base/profiler/module_cache_stub.cc b/chromium/base/test/copy_only_int.cc index 2c9231ad1e6..d135a861fe8 100644 --- a/chromium/base/profiler/module_cache_stub.cc +++ b/chromium/base/test/copy_only_int.cc @@ -2,14 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "base/profiler/module_cache.h" +#include "base/test/copy_only_int.h" namespace base { // static -std::unique_ptr<const ModuleCache::Module> ModuleCache::CreateModuleForAddress( - uintptr_t address) { - return nullptr; -} +int CopyOnlyInt::num_copies_ = 0; } // namespace base diff --git a/chromium/base/test/copy_only_int.h b/chromium/base/test/copy_only_int.h new file mode 100644 index 00000000000..5cd969cf1c2 --- /dev/null +++ b/chromium/base/test/copy_only_int.h @@ -0,0 +1,61 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_COPY_ONLY_INT_H_ +#define BASE_TEST_COPY_ONLY_INT_H_ + +#include "base/macros.h" + +namespace base { + +// A copy-only (not moveable) class that holds an integer. This is designed for +// testing containers. See also MoveOnlyInt. +class CopyOnlyInt { + public: + explicit CopyOnlyInt(int data = 1) : data_(data) {} + CopyOnlyInt(const CopyOnlyInt& other) : data_(other.data_) { ++num_copies_; } + ~CopyOnlyInt() { data_ = 0; } + + friend bool operator==(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) { + return lhs.data_ == rhs.data_; + } + + friend bool operator!=(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) { + return !operator==(lhs, rhs); + } + + friend bool operator<(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) { + return lhs.data_ < rhs.data_; + } + + friend bool operator>(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const CopyOnlyInt& lhs, const CopyOnlyInt& rhs) { + return !(lhs < rhs); + } + + int data() const { return data_; } + + static void reset_num_copies() { num_copies_ = 0; } + + static int num_copies() { return num_copies_; } + + private: + volatile int data_; + + static int num_copies_; + + CopyOnlyInt(CopyOnlyInt&&) = delete; + CopyOnlyInt& operator=(CopyOnlyInt&) = delete; +}; + +} // namespace base + +#endif // BASE_TEST_COPY_ONLY_INT_H_ diff --git a/chromium/base/test/fontconfig_util_linux.cc b/chromium/base/test/fontconfig_util_linux.cc index b7bf9e65f70..4f6c1c71349 100644 --- a/chromium/base/test/fontconfig_util_linux.cc +++ b/chromium/base/test/fontconfig_util_linux.cc @@ -9,9 +9,9 @@ #include <memory> #include "base/base_paths.h" +#include "base/check.h" #include "base/environment.h" #include "base/files/file_path.h" -#include "base/logging.h" #include "base/path_service.h" namespace base { diff --git a/chromium/base/test/generate_fontconfig_caches.cc b/chromium/base/test/generate_fontconfig_caches.cc new file mode 100644 index 00000000000..cd01d551ef0 --- /dev/null +++ b/chromium/base/test/generate_fontconfig_caches.cc @@ -0,0 +1,66 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <fontconfig/fontconfig.h> +#include <string.h> +#include <time.h> +#include <utime.h> + +#include <string> + +#include "base/base_paths.h" +#include "base/files/file.h" +#include "base/files/file_path.h" +#include "base/files/file_util.h" +#include "base/path_service.h" +#include "base/strings/strcat.h" +#include "base/test/fontconfig_util_linux.h" + +// GIANT WARNING: The point of this file is to front-load construction of the +// font cache [which takes 600ms] from test run time to compile time. This saves +// 600ms on each test shard which uses the font cache into compile time. The +// problem is that fontconfig cache construction is not intended to be +// deterministic. This executable tries to set some external state to ensure +// determinism. We have no way of guaranteeing that this produces correct +// results, or even has the intended effect. +int main() { + // fontconfig generates a random uuid and uses it to match font folders with + // the font cache. Rather than letting fontconfig generate a random uuid, + // which introduces build non-determinism, we place a fixed uuid in the font + // folder, which fontconfig will use to generate the cache. + base::FilePath dir_module; + base::PathService::Get(base::DIR_MODULE, &dir_module); + base::FilePath uuid_file_path = + dir_module.Append("test_fonts").Append(".uuid"); + const char uuid[] = "fb5c91b2895aa445d23aebf7f9e2189c"; + WriteFile(uuid_file_path, uuid); + + // fontconfig writes the mtime of the test_fonts directory into the cache. It + // presumably checks this later to ensure that the cache is still up to date. + // We set the mtime to an arbitrary, fixed time in the past. + base::FilePath test_fonts_file_path = dir_module.Append("test_fonts"); + base::stat_wrapper_t old_times; + struct utimbuf new_times; + + base::File::Stat(test_fonts_file_path.value().c_str(), &old_times); + new_times.actime = old_times.st_atime; + // Use an arbitrary, fixed time. + new_times.modtime = 123456789; + utime(test_fonts_file_path.value().c_str(), &new_times); + + base::FilePath fontconfig_caches = dir_module.Append("fontconfig_caches"); + + // Delete directory before generating fontconfig caches. This will notify + // future fontconfig_caches changes. + CHECK(base::DeleteFileRecursively(fontconfig_caches)); + + base::SetUpFontconfig(); + FcInit(); + FcFini(); + + // Check existence of intended fontconfig cache file. + CHECK(base::PathExists( + fontconfig_caches.Append(base::StrCat({uuid, "-le64.cache-7"})))); + return 0; +} diff --git a/chromium/base/test/gmock_callback_support.h b/chromium/base/test/gmock_callback_support.h new file mode 100644 index 00000000000..61b880d9475 --- /dev/null +++ b/chromium/base/test/gmock_callback_support.h @@ -0,0 +1,150 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_GMOCK_CALLBACK_SUPPORT_H_ +#define BASE_TEST_GMOCK_CALLBACK_SUPPORT_H_ + +#include <functional> +#include <tuple> +#include <utility> + +#include "base/callback.h" +#include "base/memory/ref_counted.h" +#include "base/memory/scoped_refptr.h" +#include "testing/gmock/include/gmock/gmock.h" + +namespace base { +namespace test { + +// TODO(crbug.com/752720): Simplify using std::apply once C++17 is available. +template <typename CallbackFunc, typename ArgTuple, size_t... I> +decltype(auto) RunOnceCallbackUnwrapped(CallbackFunc&& f, + ArgTuple&& t, + std::index_sequence<I...>) { + return std::move(f).Run(std::get<I>(t)...); +} + +// TODO(crbug.com/752720): Simplify using std::apply once C++17 is available. +template <typename CallbackFunc, typename ArgTuple, size_t... I> +decltype(auto) RunRepeatingCallbackUnwrapped(CallbackFunc&& f, + ArgTuple&& t, + std::index_sequence<I...>) { + return f.Run(std::get<I>(t)...); +} + +// Functor used for RunOnceClosure<N>() and RunOnceCallback<N>() actions. +template <size_t I, typename... Vals> +struct RunOnceCallbackAction { + std::tuple<Vals...> vals; + + template <typename... Args> + decltype(auto) operator()(Args&&... args) { + constexpr size_t size = std::tuple_size<decltype(vals)>::value; + return RunOnceCallbackUnwrapped( + std::get<I>(std::forward_as_tuple(std::forward<Args>(args)...)), + std::move(vals), std::make_index_sequence<size>{}); + } +}; + +// Functor used for RunClosure<N>() and RunCallback<N>() actions. +template <size_t I, typename... Vals> +struct RunRepeatingCallbackAction { + std::tuple<Vals...> vals; + + template <typename... Args> + decltype(auto) operator()(Args&&... args) { + constexpr size_t size = std::tuple_size<decltype(vals)>::value; + return RunRepeatingCallbackUnwrapped( + std::get<I>(std::forward_as_tuple(std::forward<Args>(args)...)), + std::move(vals), std::make_index_sequence<size>{}); + } +}; + +// Matchers for base::{Once,Repeating}Callback and +// base::{Once,Repeating}Closure. +MATCHER(IsNullCallback, "a null callback") { + return (arg.is_null()); +} + +MATCHER(IsNotNullCallback, "a non-null callback") { + return (!arg.is_null()); +} + +// The Run[Once]Closure() action invokes the Run() method on the closure +// provided when the action is constructed. Function arguments passed when the +// action is run will be ignored. +ACTION_P(RunClosure, closure) { + closure.Run(); +} + +// This action can be invoked at most once. Any further invocation will trigger +// a CHECK failure. +inline auto RunOnceClosure(base::OnceClosure cb) { + // Mock actions need to be copyable, but OnceClosure is not. Wrap the closure + // in a base::RefCountedData<> to allow it to be copied. An alternative would + // be to use AdaptCallbackForRepeating(), but that allows the closure to be + // run more than once and silently ignores any invocation after the first. + // Since this is for use by tests, it's better to crash or CHECK-fail and + // surface the incorrect usage, rather than have a silent unexpected success. + using RefCountedOnceClosure = base::RefCountedData<base::OnceClosure>; + scoped_refptr<RefCountedOnceClosure> copyable_cb = + base::MakeRefCounted<RefCountedOnceClosure>(std::move(cb)); + return [copyable_cb](auto&&...) { + CHECK(copyable_cb->data); + std::move(copyable_cb->data).Run(); + }; +} + +// The Run[Once]Closure<N>() action invokes the Run() method on the N-th +// (0-based) argument of the mock function. +template <size_t I> +RunRepeatingCallbackAction<I> RunClosure() { + return {}; +} + +template <size_t I> +RunOnceCallbackAction<I> RunOnceClosure() { + return {}; +} + +// The Run[Once]Callback<N>(p1, p2, ..., p_k) action invokes the Run() method on +// the N-th (0-based) argument of the mock function, with arguments p1, p2, ..., +// p_k. +// +// Notes: +// +// 1. The arguments are passed by value by default. If you need to +// pass an argument by reference, wrap it inside ByRef(). For example, +// +// RunCallback<1>(5, string("Hello"), ByRef(foo)) +// +// passes 5 and string("Hello") by value, and passes foo by reference. +// +// 2. If the callback takes an argument by reference but ByRef() is +// not used, it will receive the reference to a copy of the value, +// instead of the original value. For example, when the 0-th +// argument of the callback takes a const string&, the action +// +// RunCallback<0>(string("Hello")) +// +// makes a copy of the temporary string("Hello") object and passes a +// reference of the copy, instead of the original temporary object, +// to the callback. This makes it easy for a user to define an +// RunCallback action from temporary values and have it performed later. +template <size_t I, typename... Vals> +RunOnceCallbackAction<I, std::decay_t<Vals>...> RunOnceCallback( + Vals&&... vals) { + return {std::forward_as_tuple(std::forward<Vals>(vals)...)}; +} + +template <size_t I, typename... Vals> +RunRepeatingCallbackAction<I, std::decay_t<Vals>...> RunCallback( + Vals&&... vals) { + return {std::forward_as_tuple(std::forward<Vals>(vals)...)}; +} + +} // namespace test +} // namespace base + +#endif // BASE_TEST_GMOCK_CALLBACK_SUPPORT_H_ diff --git a/chromium/base/test/gmock_callback_support_unittest.cc b/chromium/base/test/gmock_callback_support_unittest.cc new file mode 100644 index 00000000000..5aa95595427 --- /dev/null +++ b/chromium/base/test/gmock_callback_support_unittest.cc @@ -0,0 +1,178 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/gmock_callback_support.h" + +#include "base/bind.h" +#include "base/callback.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +using testing::ByRef; +using testing::MockFunction; + +namespace base { +namespace test { + +using TestCallback = base::RepeatingCallback<void(const bool& src, bool* dst)>; +using TestOnceCallback = base::OnceCallback<void(const bool& src, bool* dst)>; + +void SetBool(const bool& src, bool* dst) { + *dst = src; +} + +TEST(GmockCallbackSupportTest, IsNullCallback) { + MockFunction<void(const TestCallback&)> check; + EXPECT_CALL(check, Call(IsNullCallback())); + check.Call(TestCallback()); +} + +TEST(GmockCallbackSupportTest, IsNotNullCallback) { + MockFunction<void(const TestCallback&)> check; + EXPECT_CALL(check, Call(IsNotNullCallback())); + check.Call(base::BindRepeating(&SetBool)); +} + +TEST(GmockCallbackSupportTest, IsNullOnceCallback) { + MockFunction<void(TestOnceCallback)> mock; + EXPECT_CALL(mock, Call(IsNullCallback())); + mock.Call(TestOnceCallback()); +} + +TEST(GmockCallbackSupportTest, IsNotNullOnceCallback) { + MockFunction<void(TestOnceCallback)> mock; + EXPECT_CALL(mock, Call(IsNotNullCallback())); + mock.Call(base::BindOnce(&SetBool)); +} + +TEST(GmockCallbackSupportTest, RunClosure0) { + MockFunction<void(const base::RepeatingClosure&)> check; + bool dst = false; + EXPECT_CALL(check, Call(IsNotNullCallback())).WillOnce(RunClosure<0>()); + check.Call(base::BindRepeating(&SetBool, true, &dst)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunClosureByRefNotReset) { + // Check that RepeatingClosure isn't reset by RunClosure<N>(). + MockFunction<void(base::RepeatingClosure&)> check; + bool dst = false; + EXPECT_CALL(check, Call(IsNotNullCallback())).WillOnce(RunClosure<0>()); + auto closure = base::BindRepeating(&SetBool, true, &dst); + check.Call(closure); + EXPECT_TRUE(dst); + EXPECT_FALSE(closure.is_null()); +} + +TEST(GmockCallbackSupportTest, RunCallback0) { + MockFunction<void(const TestCallback&)> check; + bool dst = false; + EXPECT_CALL(check, Call(IsNotNullCallback())) + .WillOnce(RunCallback<0>(true, &dst)); + check.Call(base::BindRepeating(&SetBool)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunCallback1) { + MockFunction<void(int, const TestCallback&)> check; + bool dst = false; + EXPECT_CALL(check, Call(0, IsNotNullCallback())) + .WillOnce(RunCallback<1>(true, &dst)); + check.Call(0, base::BindRepeating(&SetBool)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunCallbackPassByRef) { + MockFunction<void(const TestCallback&)> check; + bool dst = false; + bool src = false; + EXPECT_CALL(check, Call(IsNotNullCallback())) + .WillOnce(RunCallback<0>(ByRef(src), &dst)); + src = true; + check.Call(base::BindRepeating(&SetBool)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunCallbackPassByValue) { + MockFunction<void(const TestCallback&)> check; + bool dst = false; + bool src = true; + EXPECT_CALL(check, Call(IsNotNullCallback())) + .WillOnce(RunCallback<0>(src, &dst)); + src = false; + check.Call(base::BindRepeating(&SetBool)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunOnceClosure0) { + MockFunction<void(base::OnceClosure)> check; + bool dst = false; + EXPECT_CALL(check, Call(IsNotNullCallback())).WillOnce(RunOnceClosure<0>()); + check.Call(base::BindOnce(&SetBool, true, &dst)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunOnceCallback0) { + MockFunction<void(TestOnceCallback)> check; + bool dst = false; + bool src = true; + EXPECT_CALL(check, Call(IsNotNullCallback())) + .WillOnce(RunOnceCallback<0>(src, &dst)); + src = false; + check.Call(base::BindOnce(&SetBool)); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunClosureValue) { + MockFunction<void()> check; + bool dst = false; + EXPECT_CALL(check, Call()) + .WillOnce(RunClosure(base::BindRepeating(&SetBool, true, &dst))); + check.Call(); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunClosureValueWithArgs) { + MockFunction<void(bool, int)> check; + bool dst = false; + EXPECT_CALL(check, Call(true, 42)) + .WillOnce(RunClosure(base::BindRepeating(&SetBool, true, &dst))); + check.Call(true, 42); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunOnceClosureValue) { + MockFunction<void()> check; + bool dst = false; + EXPECT_CALL(check, Call()) + .WillOnce(RunOnceClosure(base::BindOnce(&SetBool, true, &dst))); + check.Call(); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunOnceClosureValueWithArgs) { + MockFunction<void(bool, int)> check; + bool dst = false; + EXPECT_CALL(check, Call(true, 42)) + .WillOnce(RunOnceClosure(base::BindOnce(&SetBool, true, &dst))); + check.Call(true, 42); + EXPECT_TRUE(dst); +} + +TEST(GmockCallbackSupportTest, RunOnceClosureValueMultipleCall) { + MockFunction<void()> check; + bool dst = false; + EXPECT_CALL(check, Call()) + .WillRepeatedly(RunOnceClosure(base::BindOnce(&SetBool, true, &dst))); + check.Call(); + EXPECT_TRUE(dst); + + // Invoking the RunOnceClosure action more than once will trigger a + // CHECK-failure. + dst = false; + EXPECT_DEATH_IF_SUPPORTED(check.Call(), ""); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/gmock_move_support.h b/chromium/base/test/gmock_move_support.h new file mode 100644 index 00000000000..8af9b219abe --- /dev/null +++ b/chromium/base/test/gmock_move_support.h @@ -0,0 +1,20 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_GMOCK_MOVE_SUPPORT_H_ +#define BASE_TEST_GMOCK_MOVE_SUPPORT_H_ + +#include <tuple> +#include <utility> + +// A similar action as testing::SaveArg, but it does an assignment with +// std::move() instead of always performing a copy. +template <size_t I = 0, typename T> +auto MoveArg(T* out) { + return [out](auto&&... args) { + *out = std::move(std::get<I>(std::tie(args...))); + }; +} + +#endif // BASE_TEST_GMOCK_MOVE_SUPPORT_H_ diff --git a/chromium/base/test/gmock_move_support_unittest.cc b/chromium/base/test/gmock_move_support_unittest.cc new file mode 100644 index 00000000000..e72fed77ca6 --- /dev/null +++ b/chromium/base/test/gmock_move_support_unittest.cc @@ -0,0 +1,60 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/gmock_move_support.h" + +#include <memory> + +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +using ::testing::DoAll; +using ::testing::Pointee; + +using MoveOnly = std::unique_ptr<int>; + +struct MockFoo { + MOCK_METHOD(void, ByRef, (MoveOnly&), ()); + MOCK_METHOD(void, ByVal, (MoveOnly), ()); + MOCK_METHOD(void, TwiceByRef, (MoveOnly&, MoveOnly&), ()); +}; +} // namespace + +TEST(GmockMoveSupportTest, MoveArgByRef) { + MoveOnly result; + + MockFoo foo; + EXPECT_CALL(foo, ByRef).WillOnce(MoveArg(&result)); + MoveOnly arg = std::make_unique<int>(123); + foo.ByRef(arg); + + EXPECT_THAT(result, Pointee(123)); +} + +TEST(GmockMoveSupportTest, MoveArgByVal) { + MoveOnly result; + + MockFoo foo; + EXPECT_CALL(foo, ByVal).WillOnce(MoveArg(&result)); + foo.ByVal(std::make_unique<int>(456)); + + EXPECT_THAT(result, Pointee(456)); +} + +TEST(GmockMoveSupportTest, MoveArgsTwiceByRef) { + MoveOnly result1; + MoveOnly result2; + + MockFoo foo; + EXPECT_CALL(foo, TwiceByRef) + .WillOnce(DoAll(MoveArg<0>(&result1), MoveArg<1>(&result2))); + MoveOnly arg1 = std::make_unique<int>(123); + MoveOnly arg2 = std::make_unique<int>(456); + foo.TwiceByRef(arg1, arg2); + + EXPECT_THAT(result1, Pointee(123)); + EXPECT_THAT(result2, Pointee(456)); +} diff --git a/chromium/base/test/gtest_util.cc b/chromium/base/test/gtest_util.cc new file mode 100644 index 00000000000..5eeda3958b7 --- /dev/null +++ b/chromium/base/test/gtest_util.cc @@ -0,0 +1,111 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/gtest_util.h" + +#include <stddef.h> + +#include <memory> + +#include "base/files/file_path.h" +#include "base/json/json_file_value_serializer.h" +#include "base/strings/string_util.h" +#include "base/values.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +TestIdentifier::TestIdentifier() = default; + +TestIdentifier::TestIdentifier(const TestIdentifier& other) = default; + +std::string FormatFullTestName(const std::string& test_case_name, + const std::string& test_name) { + return test_case_name + "." + test_name; +} + +std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name) { + std::string test_name_no_disabled(full_test_name); + ReplaceSubstringsAfterOffset(&test_name_no_disabled, 0, "DISABLED_", ""); + return test_name_no_disabled; +} + +std::vector<TestIdentifier> GetCompiledInTests() { + testing::UnitTest* const unit_test = testing::UnitTest::GetInstance(); + + std::vector<TestIdentifier> tests; + for (int i = 0; i < unit_test->total_test_case_count(); ++i) { + const testing::TestCase* test_case = unit_test->GetTestCase(i); + for (int j = 0; j < test_case->total_test_count(); ++j) { + const testing::TestInfo* test_info = test_case->GetTestInfo(j); + TestIdentifier test_data; + test_data.test_case_name = test_case->name(); + test_data.test_name = test_info->name(); + test_data.file = test_info->file(); + test_data.line = test_info->line(); + tests.push_back(test_data); + } + } + return tests; +} + +bool WriteCompiledInTestsToFile(const FilePath& path) { + std::vector<TestIdentifier> tests(GetCompiledInTests()); + + ListValue root; + for (const auto& i : tests) { + std::unique_ptr<DictionaryValue> test_info(new DictionaryValue); + test_info->SetStringKey("test_case_name", i.test_case_name); + test_info->SetStringKey("test_name", i.test_name); + test_info->SetStringKey("file", i.file); + test_info->SetIntKey("line", i.line); + root.Append(std::move(test_info)); + } + + JSONFileValueSerializer serializer(path); + return serializer.Serialize(root); +} + +bool ReadTestNamesFromFile(const FilePath& path, + std::vector<TestIdentifier>* output) { + JSONFileValueDeserializer deserializer(path); + int error_code = 0; + std::string error_message; + std::unique_ptr<base::Value> value = + deserializer.Deserialize(&error_code, &error_message); + if (!value.get()) + return false; + + base::ListValue* tests = nullptr; + if (!value->GetAsList(&tests)) + return false; + + std::vector<base::TestIdentifier> result; + for (const auto& i : *tests) { + const base::DictionaryValue* test = nullptr; + if (!i.GetAsDictionary(&test)) + return false; + + TestIdentifier test_data; + + if (!test->GetStringASCII("test_case_name", &test_data.test_case_name)) + return false; + + if (!test->GetStringASCII("test_name", &test_data.test_name)) + return false; + + if (!test->GetStringASCII("file", &test_data.file)) + return false; + + if (!test->GetInteger("line", &test_data.line)) + return false; + + result.push_back(test_data); + } + + output->swap(result); + return true; +} + +} // namespace base diff --git a/chromium/base/test/gtest_util.h b/chromium/base/test/gtest_util.h new file mode 100644 index 00000000000..1db1fae1e2d --- /dev/null +++ b/chromium/base/test/gtest_util.h @@ -0,0 +1,114 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_GTEST_UTIL_H_ +#define BASE_TEST_GTEST_UTIL_H_ + +#include <string> +#include <utility> +#include <vector> + +#include "base/compiler_specific.h" +#include "base/logging.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +// EXPECT/ASSERT_DCHECK_DEATH is intended to replace EXPECT/ASSERT_DEBUG_DEATH +// when the death is expected to be caused by a DCHECK. Contrary to +// EXPECT/ASSERT_DEBUG_DEATH however, it doesn't execute the statement in non- +// dcheck builds as DCHECKs are intended to catch things that should never +// happen and as such executing the statement results in undefined behavior +// (|statement| is compiled in unsupported configurations nonetheless). +// Death tests misbehave on Android. +#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) + +// EXPECT/ASSERT_DCHECK_DEATH tests verify that a DCHECK is hit ("Check failed" +// is part of the error message), but intentionally do not expose the gtest +// death test's full |regex| parameter to avoid users having to verify the exact +// syntax of the error message produced by the DCHECK. + +// Official builds will eat stream parameters, so don't check the error message. +#if defined(OFFICIAL_BUILD) && defined(NDEBUG) +#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "") +#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "") +#else +#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed") +#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed") +#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG) + +#else +// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) + +#define EXPECT_DCHECK_DEATH(statement) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", ) +#define ASSERT_DCHECK_DEATH(statement) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, "Check failed", return) + +#endif +// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) + +// As above, but for CHECK(). +#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) + +// Official builds will eat stream parameters, so don't check the error message. +#if defined(OFFICIAL_BUILD) && defined(NDEBUG) +#define EXPECT_CHECK_DEATH(statement) EXPECT_DEATH(statement, "") +#define ASSERT_CHECK_DEATH(statement) ASSERT_DEATH(statement, "") +#else +#define EXPECT_CHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed") +#define ASSERT_CHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed") +#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG) + +#else // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) + +// Note GTEST_UNSUPPORTED_DEATH_TEST takes a |regex| only to see whether it is a +// valid regex. It is never evaluated. +#define EXPECT_CHECK_DEATH(statement) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, "", ) +#define ASSERT_CHECK_DEATH(statement) \ + GTEST_UNSUPPORTED_DEATH_TEST(statement, "", return ) + +#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID) + +namespace base { + +class FilePath; + +struct TestIdentifier { + TestIdentifier(); + TestIdentifier(const TestIdentifier& other); + + std::string test_case_name; + std::string test_name; + std::string file; + int line; +}; + +// Constructs a full test name given a test case name and a test name, +// e.g. for test case "A" and test name "B" returns "A.B". +std::string FormatFullTestName(const std::string& test_case_name, + const std::string& test_name); + +// Returns the full test name with the "DISABLED_" prefix stripped out. +// e.g. for the full test names "A.DISABLED_B", "DISABLED_A.B", and +// "DISABLED_A.DISABLED_B", returns "A.B". +std::string TestNameWithoutDisabledPrefix(const std::string& full_test_name); + +// Returns a vector of gtest-based tests compiled into +// current executable. +std::vector<TestIdentifier> GetCompiledInTests(); + +// Writes the list of gtest-based tests compiled into +// current executable as a JSON file. Returns true on success. +bool WriteCompiledInTestsToFile(const FilePath& path) WARN_UNUSED_RESULT; + +// Reads the list of gtest-based tests from |path| into |output|. +// Returns true on success. +bool ReadTestNamesFromFile( + const FilePath& path, + std::vector<TestIdentifier>* output) WARN_UNUSED_RESULT; + +} // namespace base + +#endif // BASE_TEST_GTEST_UTIL_H_ diff --git a/chromium/base/test/gtest_xml_unittest_result_printer.cc b/chromium/base/test/gtest_xml_unittest_result_printer.cc new file mode 100644 index 00000000000..709450b5329 --- /dev/null +++ b/chromium/base/test/gtest_xml_unittest_result_printer.cc @@ -0,0 +1,165 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/gtest_xml_unittest_result_printer.h" + +#include "base/base64.h" +#include "base/check.h" +#include "base/command_line.h" +#include "base/files/file_util.h" +#include "base/test/test_switches.h" +#include "base/time/time.h" + +namespace base { + +namespace { +const int kDefaultTestPartResultsLimit = 10; + +const char kTestPartLesultsLimitExceeded[] = + "Test part results limit exceeded. Use --test-launcher-test-part-limit to " + "increase or disable limit."; +} // namespace + +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter() + : output_file_(nullptr), open_failed_(false) {} + +XmlUnitTestResultPrinter::~XmlUnitTestResultPrinter() { + if (output_file_ && !open_failed_) { + fprintf(output_file_, "</testsuites>\n"); + fflush(output_file_); + CloseFile(output_file_); + } +} + +bool XmlUnitTestResultPrinter::Initialize(const FilePath& output_file_path) { + DCHECK(!output_file_); + output_file_ = OpenFile(output_file_path, "w"); + if (!output_file_) { + // If the file open fails, we set the output location to stderr. This is + // because in current usage our caller CHECKs the result of this function. + // But that in turn causes a LogMessage that comes back to this object, + // which in turn causes a (double) crash. By pointing at stderr, there might + // be some indication what's going wrong. See https://crbug.com/736783. + output_file_ = stderr; + open_failed_ = true; + return false; + } + + fprintf(output_file_, + "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<testsuites>\n"); + fflush(output_file_); + + return true; +} + +void XmlUnitTestResultPrinter::OnAssert(const char* file, + int line, + const std::string& summary, + const std::string& message) { + WriteTestPartResult(file, line, testing::TestPartResult::kFatalFailure, + summary, message); +} + +void XmlUnitTestResultPrinter::OnTestCaseStart( + const testing::TestCase& test_case) { + fprintf(output_file_, " <testsuite>\n"); + fflush(output_file_); +} + +void XmlUnitTestResultPrinter::OnTestStart( + const testing::TestInfo& test_info) { + // This is our custom extension - it helps to recognize which test was + // running when the test binary crashed. Note that we cannot even open the + // <testcase> tag here - it requires e.g. run time of the test to be known. + fprintf(output_file_, + " <x-teststart name=\"%s\" classname=\"%s\" />\n", + test_info.name(), + test_info.test_case_name()); + fflush(output_file_); +} + +void XmlUnitTestResultPrinter::OnTestEnd(const testing::TestInfo& test_info) { + fprintf(output_file_, + " <testcase name=\"%s\" status=\"run\" time=\"%.3f\"" + " classname=\"%s\">\n", + test_info.name(), + static_cast<double>(test_info.result()->elapsed_time()) / + Time::kMillisecondsPerSecond, + test_info.test_case_name()); + if (test_info.result()->Failed()) { + fprintf(output_file_, + " <failure message=\"\" type=\"\"></failure>\n"); + } + + int limit = test_info.result()->total_part_count(); + if (CommandLine::ForCurrentProcess()->HasSwitch( + switches::kTestLauncherTestPartResultsLimit)) { + std::string limit_str = + CommandLine::ForCurrentProcess()->GetSwitchValueASCII( + switches::kTestLauncherTestPartResultsLimit); + int test_part_results_limit = std::strtol(limit_str.c_str(), nullptr, 10); + if (test_part_results_limit >= 0) + limit = std::min(limit, test_part_results_limit); + } else { + limit = std::min(limit, kDefaultTestPartResultsLimit); + } + + for (int i = 0; i < limit; ++i) { + const auto& test_part_result = test_info.result()->GetTestPartResult(i); + WriteTestPartResult(test_part_result.file_name(), + test_part_result.line_number(), test_part_result.type(), + test_part_result.summary(), test_part_result.message()); + } + + if (test_info.result()->total_part_count() > limit) { + WriteTestPartResult( + "unknown", 0, testing::TestPartResult::kNonFatalFailure, + kTestPartLesultsLimitExceeded, kTestPartLesultsLimitExceeded); + } + + fprintf(output_file_, " </testcase>\n"); + fflush(output_file_); +} + +void XmlUnitTestResultPrinter::OnTestCaseEnd( + const testing::TestCase& test_case) { + fprintf(output_file_, " </testsuite>\n"); + fflush(output_file_); +} + +void XmlUnitTestResultPrinter::WriteTestPartResult( + const char* file, + int line, + testing::TestPartResult::Type result_type, + const std::string& summary, + const std::string& message) { + const char* type = "unknown"; + switch (result_type) { + case testing::TestPartResult::kSuccess: + type = "success"; + break; + case testing::TestPartResult::kNonFatalFailure: + type = "failure"; + break; + case testing::TestPartResult::kFatalFailure: + type = "fatal_failure"; + break; + case testing::TestPartResult::kSkip: + type = "skip"; + break; + } + std::string summary_encoded; + Base64Encode(summary, &summary_encoded); + std::string message_encoded; + Base64Encode(message, &message_encoded); + fprintf(output_file_, + " <x-test-result-part type=\"%s\" file=\"%s\" line=\"%d\">\n" + " <summary>%s</summary>\n" + " <message>%s</message>\n" + " </x-test-result-part>\n", + type, file, line, summary_encoded.c_str(), message_encoded.c_str()); + fflush(output_file_); +} + +} // namespace base diff --git a/chromium/base/test/gtest_xml_unittest_result_printer.h b/chromium/base/test/gtest_xml_unittest_result_printer.h new file mode 100644 index 00000000000..93403822cfa --- /dev/null +++ b/chromium/base/test/gtest_xml_unittest_result_printer.h @@ -0,0 +1,55 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_GTEST_XML_UNITTEST_RESULT_PRINTER_H_ +#define BASE_TEST_GTEST_XML_UNITTEST_RESULT_PRINTER_H_ + +#include <stdio.h> + +#include "base/compiler_specific.h" +#include "base/macros.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +class FilePath; + +// Generates an XML output file. Format is very close to GTest, but has +// extensions needed by the test launcher. +class XmlUnitTestResultPrinter : public testing::EmptyTestEventListener { + public: + XmlUnitTestResultPrinter(); + ~XmlUnitTestResultPrinter() override; + + // Must be called before adding as a listener. Returns true on success. + bool Initialize(const FilePath& output_file_path) WARN_UNUSED_RESULT; + + // CHECK/DCHECK failed. Print file/line and message to the xml. + void OnAssert(const char* file, + int line, + const std::string& summary, + const std::string& message); + + private: + // testing::EmptyTestEventListener: + void OnTestCaseStart(const testing::TestCase& test_case) override; + void OnTestStart(const testing::TestInfo& test_info) override; + void OnTestEnd(const testing::TestInfo& test_info) override; + void OnTestCaseEnd(const testing::TestCase& test_case) override; + + void WriteTestPartResult(const char* file, + int line, + testing::TestPartResult::Type type, + const std::string& summary, + const std::string& message); + + FILE* output_file_; + bool open_failed_; + + DISALLOW_COPY_AND_ASSIGN(XmlUnitTestResultPrinter); +}; + +} // namespace base + +#endif // BASE_TEST_GTEST_XML_UNITTEST_RESULT_PRINTER_H_ diff --git a/chromium/base/test/gtest_xml_util.cc b/chromium/base/test/gtest_xml_util.cc new file mode 100644 index 00000000000..1bac5a6b1d2 --- /dev/null +++ b/chromium/base/test/gtest_xml_util.cc @@ -0,0 +1,235 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/gtest_xml_util.h" + +#include <stdint.h> + +#include "base/base64.h" +#include "base/check.h" +#include "base/files/file_util.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/stringprintf.h" +#include "base/test/gtest_util.h" +#include "base/test/launcher/test_launcher.h" +#include "third_party/libxml/chromium/libxml_utils.h" +#include "third_party/libxml/chromium/xml_reader.h" + +namespace base { + +namespace { + +// This is used for the xml parser to report errors. This assumes the context +// is a pointer to a std::string where the error message should be appended. +static void XmlErrorFunc(void *context, const char *message, ...) { + va_list args; + va_start(args, message); + std::string* error = static_cast<std::string*>(context); + StringAppendV(error, message, args); + va_end(args); +} + +} // namespace + +bool ProcessGTestOutput(const base::FilePath& output_file, + std::vector<TestResult>* results, + bool* crashed) { + DCHECK(results); + + std::string xml_contents; + if (!ReadFileToString(output_file, &xml_contents)) + return false; + + // Silence XML errors - otherwise they go to stderr. + std::string xml_errors; + ScopedXmlErrorFunc error_func(&xml_errors, &XmlErrorFunc); + + XmlReader xml_reader; + if (!xml_reader.Load(xml_contents)) + return false; + + enum { + STATE_INIT, + STATE_TESTSUITE, + STATE_TESTCASE, + STATE_TEST_RESULT, + STATE_FAILURE, + STATE_END, + } state = STATE_INIT; + + while (xml_reader.Read()) { + xml_reader.SkipToElement(); + std::string node_name(xml_reader.NodeName()); + + switch (state) { + case STATE_INIT: + if (node_name == "testsuites" && !xml_reader.IsClosingElement()) + state = STATE_TESTSUITE; + else + return false; + break; + case STATE_TESTSUITE: + if (node_name == "testsuites" && xml_reader.IsClosingElement()) + state = STATE_END; + else if (node_name == "testsuite" && !xml_reader.IsClosingElement()) + state = STATE_TESTCASE; + else + return false; + break; + case STATE_TESTCASE: + if (node_name == "testsuite" && xml_reader.IsClosingElement()) { + state = STATE_TESTSUITE; + } else if (node_name == "x-teststart" && + !xml_reader.IsClosingElement()) { + // This is our custom extension that helps recognize which test was + // running when the test binary crashed. + TestResult result; + + std::string test_case_name; + if (!xml_reader.NodeAttribute("classname", &test_case_name)) + return false; + std::string test_name; + if (!xml_reader.NodeAttribute("name", &test_name)) + return false; + result.full_name = FormatFullTestName(test_case_name, test_name); + + result.elapsed_time = TimeDelta(); + + // Assume the test crashed - we can correct that later. + result.status = TestResult::TEST_CRASH; + + results->push_back(result); + } else if (node_name == "testcase" && !xml_reader.IsClosingElement()) { + std::string test_status; + if (!xml_reader.NodeAttribute("status", &test_status)) + return false; + + if (test_status != "run" && test_status != "notrun") + return false; + if (test_status != "run") + break; + + TestResult result; + + std::string test_case_name; + if (!xml_reader.NodeAttribute("classname", &test_case_name)) + return false; + std::string test_name; + if (!xml_reader.NodeAttribute("name", &test_name)) + return false; + result.full_name = test_case_name + "." + test_name; + + std::string test_time_str; + if (!xml_reader.NodeAttribute("time", &test_time_str)) + return false; + result.elapsed_time = TimeDelta::FromMicroseconds( + static_cast<int64_t>(strtod(test_time_str.c_str(), nullptr) * + Time::kMicrosecondsPerSecond)); + + result.status = TestResult::TEST_SUCCESS; + + if (!results->empty() && + results->back().full_name == result.full_name && + results->back().status == TestResult::TEST_CRASH) { + // Erase the fail-safe "crashed" result - now we know the test did + // not crash. + results->pop_back(); + } + + results->push_back(result); + } else if (node_name == "failure" && !xml_reader.IsClosingElement()) { + std::string failure_message; + if (!xml_reader.NodeAttribute("message", &failure_message)) + return false; + + DCHECK(!results->empty()); + results->back().status = TestResult::TEST_FAILURE; + + state = STATE_FAILURE; + } else if (node_name == "testcase" && xml_reader.IsClosingElement()) { + // Deliberately empty. + } else if (node_name == "x-test-result-part" && + !xml_reader.IsClosingElement()) { + std::string result_type; + if (!xml_reader.NodeAttribute("type", &result_type)) + return false; + + std::string file_name; + if (!xml_reader.NodeAttribute("file", &file_name)) + return false; + + std::string line_number_str; + if (!xml_reader.NodeAttribute("line", &line_number_str)) + return false; + + int line_number; + if (!StringToInt(line_number_str, &line_number)) + return false; + + TestResultPart::Type type; + if (!TestResultPart::TypeFromString(result_type, &type)) + return false; + + TestResultPart test_result_part; + test_result_part.type = type; + test_result_part.file_name = file_name, + test_result_part.line_number = line_number; + DCHECK(!results->empty()); + results->back().test_result_parts.push_back(test_result_part); + + state = STATE_TEST_RESULT; + } else { + return false; + } + break; + case STATE_TEST_RESULT: + if (node_name == "summary" && !xml_reader.IsClosingElement()) { + std::string summary; + if (!xml_reader.ReadElementContent(&summary)) + return false; + + if (!Base64Decode(summary, &summary)) + return false; + + DCHECK(!results->empty()); + DCHECK(!results->back().test_result_parts.empty()); + results->back().test_result_parts.back().summary = summary; + } else if (node_name == "summary" && xml_reader.IsClosingElement()) { + } else if (node_name == "message" && !xml_reader.IsClosingElement()) { + std::string message; + if (!xml_reader.ReadElementContent(&message)) + return false; + + if (!Base64Decode(message, &message)) + return false; + + DCHECK(!results->empty()); + DCHECK(!results->back().test_result_parts.empty()); + results->back().test_result_parts.back().message = message; + } else if (node_name == "message" && xml_reader.IsClosingElement()) { + } else if (node_name == "x-test-result-part" && + xml_reader.IsClosingElement()) { + state = STATE_TESTCASE; + } else { + return false; + } + break; + case STATE_FAILURE: + if (node_name == "failure" && xml_reader.IsClosingElement()) + state = STATE_TESTCASE; + else + return false; + break; + case STATE_END: + // If we are here and there are still XML elements, the file has wrong + // format. + return false; + } + } + + *crashed = (state != STATE_END); + return true; +} + +} // namespace base diff --git a/chromium/base/test/gtest_xml_util.h b/chromium/base/test/gtest_xml_util.h new file mode 100644 index 00000000000..b023f80da18 --- /dev/null +++ b/chromium/base/test/gtest_xml_util.h @@ -0,0 +1,27 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_GTEST_XML_UTIL_H_ +#define BASE_TEST_GTEST_XML_UTIL_H_ + +#include <vector> + +#include "base/compiler_specific.h" + +namespace base { + +class FilePath; +struct TestResult; + +// Produces a vector of test results based on GTest output file. +// Returns true iff the output file exists and has been successfully parsed. +// On successful return |crashed| is set to true if the test results +// are valid but incomplete. +bool ProcessGTestOutput(const base::FilePath& output_file, + std::vector<TestResult>* results, + bool* crashed) WARN_UNUSED_RESULT; + +} // namespace base + +#endif // BASE_TEST_GTEST_XML_UTIL_H_ diff --git a/chromium/base/test/icu_test_util.cc b/chromium/base/test/icu_test_util.cc new file mode 100644 index 00000000000..c15a6df9349 --- /dev/null +++ b/chromium/base/test/icu_test_util.cc @@ -0,0 +1,49 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/icu_test_util.h" + +#include "base/base_switches.h" +#include "base/command_line.h" +#include "base/i18n/icu_util.h" +#include "base/i18n/rtl.h" +#include "third_party/icu/source/common/unicode/uloc.h" +#include "third_party/icu/source/i18n/unicode/timezone.h" + +namespace base { +namespace test { + +ScopedRestoreICUDefaultLocale::ScopedRestoreICUDefaultLocale() + : ScopedRestoreICUDefaultLocale(std::string()) {} + +ScopedRestoreICUDefaultLocale::ScopedRestoreICUDefaultLocale( + const std::string& locale) + : default_locale_(uloc_getDefault()) { + if (!locale.empty()) + i18n::SetICUDefaultLocale(locale.data()); +} + +ScopedRestoreICUDefaultLocale::~ScopedRestoreICUDefaultLocale() { + i18n::SetICUDefaultLocale(default_locale_.data()); +} + +ScopedRestoreDefaultTimezone::ScopedRestoreDefaultTimezone(const char* zoneid) { + original_zone_.reset(icu::TimeZone::createDefault()); + icu::TimeZone::adoptDefault(icu::TimeZone::createTimeZone(zoneid)); +} + +ScopedRestoreDefaultTimezone::~ScopedRestoreDefaultTimezone() { + icu::TimeZone::adoptDefault(original_zone_.release()); +} + +void InitializeICUForTesting() { + if (!CommandLine::ForCurrentProcess()->HasSwitch( + switches::kTestDoNotInitializeIcu)) { + i18n::AllowMultipleInitializeCallsForTesting(); + i18n::InitializeICU(); + } +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/icu_test_util.h b/chromium/base/test/icu_test_util.h new file mode 100644 index 00000000000..91f44ffbd36 --- /dev/null +++ b/chromium/base/test/icu_test_util.h @@ -0,0 +1,59 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_ICU_TEST_UTIL_H_ +#define BASE_TEST_ICU_TEST_UTIL_H_ + +#include <memory> +#include <string> + +#include "base/macros.h" +#include "third_party/icu/source/common/unicode/uversion.h" + +U_NAMESPACE_BEGIN +class TimeZone; +U_NAMESPACE_END + +namespace base { +namespace test { + +// In unit tests, prefer ScopedRestoreICUDefaultLocale over +// calling base::i18n::SetICUDefaultLocale() directly. This scoper makes it +// harder to accidentally forget to reset the locale. +class ScopedRestoreICUDefaultLocale { + public: + ScopedRestoreICUDefaultLocale(); + explicit ScopedRestoreICUDefaultLocale(const std::string& locale); + ~ScopedRestoreICUDefaultLocale(); + + private: + const std::string default_locale_; + + ScopedRestoreICUDefaultLocale(const ScopedRestoreICUDefaultLocale&) = delete; + ScopedRestoreICUDefaultLocale& operator=( + const ScopedRestoreICUDefaultLocale&) = delete; +}; + +// In unit tests, prefer ScopedRestoreDefaultTimezone over +// calling icu::TimeZone::adoptDefault() directly. This scoper makes it +// harder to accidentally forget to reset the locale. +class ScopedRestoreDefaultTimezone { + public: + ScopedRestoreDefaultTimezone(const char* zoneid); + ~ScopedRestoreDefaultTimezone(); + + ScopedRestoreDefaultTimezone(const ScopedRestoreDefaultTimezone&) = delete; + ScopedRestoreDefaultTimezone& operator=(const ScopedRestoreDefaultTimezone&) = + delete; + + private: + std::unique_ptr<icu::TimeZone> original_zone_; +}; + +void InitializeICUForTesting(); + +} // namespace test +} // namespace base + +#endif // BASE_TEST_ICU_TEST_UTIL_H_ diff --git a/chromium/base/test/immediate_crash_test_helper.cc b/chromium/base/test/immediate_crash_test_helper.cc new file mode 100644 index 00000000000..676a2ba6cdb --- /dev/null +++ b/chromium/base/test/immediate_crash_test_helper.cc @@ -0,0 +1,32 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/immediate_crash.h" // nogncheck + +#if defined(WIN32) +#define IMMEDIATE_CRASH_TEST_HELPER_EXPORT __declspec(dllexport) +#else // defined(WIN32) +#define IMMEDIATE_CRASH_TEST_HELPER_EXPORT \ + __attribute__((visibility("default"))) +#endif // defined(WIN32) + +extern "C" { + +IMMEDIATE_CRASH_TEST_HELPER_EXPORT int TestFunction1(int x, int y) { + if (x < 1) + IMMEDIATE_CRASH(); + if (y < 1) + IMMEDIATE_CRASH(); + return x + y; +} + +IMMEDIATE_CRASH_TEST_HELPER_EXPORT int TestFunction2(int x, int y) { + if (x < 2) + IMMEDIATE_CRASH(); + if (y < 2) + IMMEDIATE_CRASH(); + return x * y; +} + +} // extern "C" diff --git a/chromium/base/test/malloc_wrapper.cc b/chromium/base/test/malloc_wrapper.cc new file mode 100644 index 00000000000..eb280a3eeea --- /dev/null +++ b/chromium/base/test/malloc_wrapper.cc @@ -0,0 +1,11 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "malloc_wrapper.h" + +#include <stdlib.h> + +void* MallocWrapper(size_t size) { + return malloc(size); +} diff --git a/chromium/base/test/malloc_wrapper.h b/chromium/base/test/malloc_wrapper.h new file mode 100644 index 00000000000..e15ea48dc83 --- /dev/null +++ b/chromium/base/test/malloc_wrapper.h @@ -0,0 +1,22 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_MALLOC_WRAPPER_H_ +#define BASE_TEST_MALLOC_WRAPPER_H_ + +#include <stddef.h> + +// BASE_EXPORT depends on COMPONENT_BUILD. +// This will always be a separate shared library, so don't use BASE_EXPORT here. +#if defined(WIN32) +#define MALLOC_WRAPPER_EXPORT __declspec(dllexport) +#else +#define MALLOC_WRAPPER_EXPORT __attribute__((visibility("default"))) +#endif // defined(WIN32) + +// Calls malloc directly. Defined as a C function so that the function can be +// easily referenced by dlsym() without complications from C++ name mangling. +extern "C" MALLOC_WRAPPER_EXPORT void* MallocWrapper(size_t size); + +#endif // BASE_TEST_MALLOC_WRAPPER_H_ diff --git a/chromium/base/test/mock_callback.h b/chromium/base/test/mock_callback.h new file mode 100644 index 00000000000..24ad8a3b346 --- /dev/null +++ b/chromium/base/test/mock_callback.h @@ -0,0 +1,386 @@ +// This file was GENERATED by command: +// pump.py mock_callback.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Analogous to GMock's built-in MockFunction, but for base::Callback instead of +// std::function. It takes the full callback type as a parameter, so that it can +// support both OnceCallback and RepeatingCallback. Furthermore, this file +// defines convenience typedefs in the form of MockOnceCallback<Signature>, +// MockRepeatingCallback<Signature>, MockOnceClosure and MockRepeatingClosure. +// +// Use: +// using FooCallback = base::RepeatingCallback<int(std::string)>; +// +// TEST(FooTest, RunsCallbackWithBarArgument) { +// base::MockCallback<FooCallback> callback; +// EXPECT_CALL(callback, Run("bar")).WillOnce(Return(1)); +// Foo(callback.Get()); +// } +// +// Or equivalently: +// +// TEST(FooTest, RunsCallbackWithBarArgument) { +// base::MockRepeatingCallback<int(std::string)> callback; +// EXPECT_CALL(callback, Run("bar")).WillOnce(Return(1)); +// Foo(callback.Get()); +// } +// +// +// Can be used with StrictMock and NiceMock. Caller must ensure that it outlives +// any base::Callback obtained from it. + +#ifndef BASE_TEST_MOCK_CALLBACK_H_ +#define BASE_TEST_MOCK_CALLBACK_H_ + +#include "base/bind.h" +#include "base/callback.h" +#include "base/macros.h" +#include "testing/gmock/include/gmock/gmock.h" + +namespace base { + +// clang-format off + +template <typename F> +class MockCallback; + +template <typename Signature> +using MockOnceCallback = MockCallback<OnceCallback<Signature>>; +template <typename Signature> +using MockRepeatingCallback = MockCallback<RepeatingCallback<Signature>>; + +using MockOnceClosure = MockCallback<OnceClosure>; +using MockRepeatingClosure = MockCallback<RepeatingClosure>; + +template <typename R> +class MockCallback<RepeatingCallback<R()>> { + public: + MockCallback() = default; + MOCK_METHOD0_T(Run, R()); + + RepeatingCallback<R()> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R> +class MockCallback<OnceCallback<R()>> { + public: + MockCallback() = default; + MOCK_METHOD0_T(Run, R()); + + OnceCallback<R()> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1> +class MockCallback<RepeatingCallback<R(A1)>> { + public: + MockCallback() = default; + MOCK_METHOD1_T(Run, R(A1)); + + RepeatingCallback<R(A1)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1> +class MockCallback<OnceCallback<R(A1)>> { + public: + MockCallback() = default; + MOCK_METHOD1_T(Run, R(A1)); + + OnceCallback<R(A1)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2> +class MockCallback<RepeatingCallback<R(A1, A2)>> { + public: + MockCallback() = default; + MOCK_METHOD2_T(Run, R(A1, A2)); + + RepeatingCallback<R(A1, A2)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2> +class MockCallback<OnceCallback<R(A1, A2)>> { + public: + MockCallback() = default; + MOCK_METHOD2_T(Run, R(A1, A2)); + + OnceCallback<R(A1, A2)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3> +class MockCallback<RepeatingCallback<R(A1, A2, A3)>> { + public: + MockCallback() = default; + MOCK_METHOD3_T(Run, R(A1, A2, A3)); + + RepeatingCallback<R(A1, A2, A3)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3> +class MockCallback<OnceCallback<R(A1, A2, A3)>> { + public: + MockCallback() = default; + MOCK_METHOD3_T(Run, R(A1, A2, A3)); + + OnceCallback<R(A1, A2, A3)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4)>> { + public: + MockCallback() = default; + MOCK_METHOD4_T(Run, R(A1, A2, A3, A4)); + + RepeatingCallback<R(A1, A2, A3, A4)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4> +class MockCallback<OnceCallback<R(A1, A2, A3, A4)>> { + public: + MockCallback() = default; + MOCK_METHOD4_T(Run, R(A1, A2, A3, A4)); + + OnceCallback<R(A1, A2, A3, A4)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4, A5)>> { + public: + MockCallback() = default; + MOCK_METHOD5_T(Run, R(A1, A2, A3, A4, A5)); + + RepeatingCallback<R(A1, A2, A3, A4, A5)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5> +class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5)>> { + public: + MockCallback() = default; + MOCK_METHOD5_T(Run, R(A1, A2, A3, A4, A5)); + + OnceCallback<R(A1, A2, A3, A4, A5)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4, A5, A6)>> { + public: + MockCallback() = default; + MOCK_METHOD6_T(Run, R(A1, A2, A3, A4, A5, A6)); + + RepeatingCallback<R(A1, A2, A3, A4, A5, A6)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6> +class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6)>> { + public: + MockCallback() = default; + MOCK_METHOD6_T(Run, R(A1, A2, A3, A4, A5, A6)); + + OnceCallback<R(A1, A2, A3, A4, A5, A6)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7)>> { + public: + MockCallback() = default; + MOCK_METHOD7_T(Run, R(A1, A2, A3, A4, A5, A6, A7)); + + RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7> +class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7)>> { + public: + MockCallback() = default; + MOCK_METHOD7_T(Run, R(A1, A2, A3, A4, A5, A6, A7)); + + OnceCallback<R(A1, A2, A3, A4, A5, A6, A7)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7, typename A8> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7, A8)>> { + public: + MockCallback() = default; + MOCK_METHOD8_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8)); + + RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7, A8)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7, typename A8> +class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8)>> { + public: + MockCallback() = default; + MOCK_METHOD8_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8)); + + OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7, typename A8, typename A9> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)>> { + public: + MockCallback() = default; + MOCK_METHOD9_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9)); + + RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7, typename A8, typename A9> +class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)>> { + public: + MockCallback() = default; + MOCK_METHOD9_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9)); + + OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7, typename A8, typename A9, + typename A10> +class MockCallback<RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, + A10)>> { + public: + MockCallback() = default; + MOCK_METHOD10_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)); + + RepeatingCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R, typename A1, typename A2, typename A3, typename A4, + typename A5, typename A6, typename A7, typename A8, typename A9, + typename A10> +class MockCallback<OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)>> { + public: + MockCallback() = default; + MOCK_METHOD10_T(Run, R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)); + + OnceCallback<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +// clang-format on + +} // namespace base + +#endif // BASE_TEST_MOCK_CALLBACK_H_ diff --git a/chromium/base/test/mock_callback.h.pump b/chromium/base/test/mock_callback.h.pump new file mode 100644 index 00000000000..59155276fd0 --- /dev/null +++ b/chromium/base/test/mock_callback.h.pump @@ -0,0 +1,104 @@ +$$ This is a pump file for generating file templates. Pump is a python +$$ script that is part of the Google Test suite of utilities. Description +$$ can be found here: +$$ +$$ https://github.com/google/googletest/blob/master/googletest/docs/PumpManual.md +$$ +$$ MAX_ARITY controls the number of arguments that MockCallback supports. +$$ It is choosen to match the number GMock supports. +$var MAX_ARITY = 10 +$$ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Analogous to GMock's built-in MockFunction, but for base::Callback instead of +// std::function. It takes the full callback type as a parameter, so that it can +// support both OnceCallback and RepeatingCallback. Furthermore, this file +// defines convenience typedefs in the form of MockOnceCallback<Signature>, +// MockRepeatingCallback<Signature>, MockOnceClosure and MockRepeatingClosure. +// +// Use: +// using FooCallback = base::RepeatingCallback<int(std::string)>; +// +// TEST(FooTest, RunsCallbackWithBarArgument) { +// base::MockCallback<FooCallback> callback; +// EXPECT_CALL(callback, Run("bar")).WillOnce(Return(1)); +// Foo(callback.Get()); +// } +// +// Or equivalently: +// +// TEST(FooTest, RunsCallbackWithBarArgument) { +// base::MockRepeatingCallback<int(std::string)> callback; +// EXPECT_CALL(callback, Run("bar")).WillOnce(Return(1)); +// Foo(callback.Get()); +// } +// +// +// Can be used with StrictMock and NiceMock. Caller must ensure that it outlives +// any base::Callback obtained from it. + +#ifndef BASE_TEST_MOCK_CALLBACK_H_ +#define BASE_TEST_MOCK_CALLBACK_H_ + +#include "base/bind.h" +#include "base/callback.h" +#include "base/macros.h" +#include "testing/gmock/include/gmock/gmock.h" + +namespace base { + +// clang-format off + +template <typename F> +class MockCallback; + +template <typename Signature> +using MockOnceCallback = MockCallback<OnceCallback<Signature>>; +template <typename Signature> +using MockRepeatingCallback = MockCallback<RepeatingCallback<Signature>>; + +using MockOnceClosure = MockCallback<OnceClosure>; +using MockRepeatingClosure = MockCallback<RepeatingClosure>; + +$range i 0..MAX_ARITY +$for i [[ +$range j 1..i +$var run_type = [[R($for j, [[A$j]])]] + +template <typename R$for j [[, typename A$j]]> +class MockCallback<RepeatingCallback<$run_type>> { + public: + MockCallback() = default; + MOCK_METHOD$(i)_T(Run, $run_type); + + RepeatingCallback<$run_type> Get() { + return BindRepeating(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +template <typename R$for j [[, typename A$j]]> +class MockCallback<OnceCallback<$run_type>> { + public: + MockCallback() = default; + MOCK_METHOD$(i)_T(Run, $run_type); + + OnceCallback<$run_type> Get() { + return BindOnce(&MockCallback::Run, Unretained(this)); + } + + private: + DISALLOW_COPY_AND_ASSIGN(MockCallback); +}; + +]] + +// clang-format on + +} // namespace base + +#endif // BASE_TEST_MOCK_CALLBACK_H_ diff --git a/chromium/base/test/mock_callback_unittest.cc b/chromium/base/test/mock_callback_unittest.cc new file mode 100644 index 00000000000..efab2823934 --- /dev/null +++ b/chromium/base/test/mock_callback_unittest.cc @@ -0,0 +1,81 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/mock_callback.h" + +#include "base/callback.h" +#include "testing/gmock/include/gmock/gmock.h" + +using testing::InSequence; +using testing::Return; + +namespace base { +namespace { + +TEST(MockCallbackTest, ZeroArgs) { + MockCallback<RepeatingClosure> mock_closure; + EXPECT_CALL(mock_closure, Run()); + mock_closure.Get().Run(); + + MockCallback<RepeatingCallback<int()>> mock_int_callback; + { + InSequence sequence; + EXPECT_CALL(mock_int_callback, Run()).WillOnce(Return(42)); + EXPECT_CALL(mock_int_callback, Run()).WillOnce(Return(88)); + } + EXPECT_EQ(42, mock_int_callback.Get().Run()); + EXPECT_EQ(88, mock_int_callback.Get().Run()); +} + +TEST(MockCallbackTest, WithArgs) { + MockCallback<RepeatingCallback<int(int, int)>> mock_two_int_callback; + EXPECT_CALL(mock_two_int_callback, Run(1, 2)).WillOnce(Return(42)); + EXPECT_CALL(mock_two_int_callback, Run(0, 0)).WillRepeatedly(Return(-1)); + RepeatingCallback<int(int, int)> two_int_callback = + mock_two_int_callback.Get(); + EXPECT_EQ(-1, two_int_callback.Run(0, 0)); + EXPECT_EQ(42, two_int_callback.Run(1, 2)); + EXPECT_EQ(-1, two_int_callback.Run(0, 0)); +} + +TEST(MockCallbackTest, ZeroArgsOnce) { + MockCallback<OnceClosure> mock_closure; + EXPECT_CALL(mock_closure, Run()); + mock_closure.Get().Run(); + + MockCallback<OnceCallback<int()>> mock_int_callback; + EXPECT_CALL(mock_int_callback, Run()).WillOnce(Return(88)); + EXPECT_EQ(88, mock_int_callback.Get().Run()); +} + +TEST(MockCallbackTest, WithArgsOnce) { + MockCallback<OnceCallback<int(int, int)>> mock_two_int_callback; + EXPECT_CALL(mock_two_int_callback, Run(1, 2)).WillOnce(Return(42)); + OnceCallback<int(int, int)> two_int_callback = mock_two_int_callback.Get(); + EXPECT_EQ(42, std::move(two_int_callback).Run(1, 2)); +} + +TEST(MockCallbackTest, Typedefs) { + static_assert(std::is_same<MockCallback<RepeatingCallback<int()>>, + MockRepeatingCallback<int()>>::value, + "Repeating typedef differs for zero args"); + static_assert(std::is_same<MockCallback<RepeatingCallback<int(int, int)>>, + MockRepeatingCallback<int(int, int)>>::value, + "Repeating typedef differs for multiple args"); + static_assert(std::is_same<MockCallback<RepeatingCallback<void()>>, + MockRepeatingClosure>::value, + "Repeating typedef differs for closure"); + static_assert(std::is_same<MockCallback<OnceCallback<int()>>, + MockOnceCallback<int()>>::value, + "Once typedef differs for zero args"); + static_assert(std::is_same<MockCallback<OnceCallback<int(int, int)>>, + MockOnceCallback<int(int, int)>>::value, + "Once typedef differs for multiple args"); + static_assert(std::is_same<MockCallback<RepeatingCallback<void()>>, + MockRepeatingClosure>::value, + "Once typedef differs for closure"); +} + +} // namespace +} // namespace base diff --git a/chromium/base/test/mock_chrome_application_mac.mm b/chromium/base/test/mock_chrome_application_mac.mm new file mode 100644 index 00000000000..2695bc88d4e --- /dev/null +++ b/chromium/base/test/mock_chrome_application_mac.mm @@ -0,0 +1,49 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/mock_chrome_application_mac.h" + +#include "base/auto_reset.h" +#include "base/check.h" + +@implementation MockCrApp + ++ (NSApplication*)sharedApplication { + NSApplication* app = [super sharedApplication]; + DCHECK([app conformsToProtocol:@protocol(CrAppControlProtocol)]) + << "Existing NSApp (class " << [[app className] UTF8String] + << ") does not conform to required protocol."; + DCHECK(base::MessagePumpMac::UsingCrApp()) + << "MessagePumpMac::Create() was called before " + << "+[MockCrApp sharedApplication]"; + return app; +} + +- (void)sendEvent:(NSEvent*)event { + base::AutoReset<BOOL> scoper(&_handlingSendEvent, YES); + [super sendEvent:event]; +} + +- (void)setHandlingSendEvent:(BOOL)handlingSendEvent { + _handlingSendEvent = handlingSendEvent; +} + +- (BOOL)isHandlingSendEvent { + return _handlingSendEvent; +} + +@end + +namespace mock_cr_app { + +void RegisterMockCrApp() { + [MockCrApp sharedApplication]; + + // If there was an invocation to NSApp prior to this method, then the NSApp + // will not be a MockCrApp, but will instead be an NSApplication. + // This is undesirable and we must enforce that this doesn't happen. + CHECK([NSApp isKindOfClass:[MockCrApp class]]); +} + +} // namespace mock_cr_app diff --git a/chromium/base/test/mock_devices_changed_observer.cc b/chromium/base/test/mock_devices_changed_observer.cc new file mode 100644 index 00000000000..9fc57cd93e7 --- /dev/null +++ b/chromium/base/test/mock_devices_changed_observer.cc @@ -0,0 +1,13 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/mock_devices_changed_observer.h" + +namespace base { + +MockDevicesChangedObserver::MockDevicesChangedObserver() = default; + +MockDevicesChangedObserver::~MockDevicesChangedObserver() = default; + +} // namespace base diff --git a/chromium/base/test/mock_devices_changed_observer.h b/chromium/base/test/mock_devices_changed_observer.h new file mode 100644 index 00000000000..42b8c3f9f24 --- /dev/null +++ b/chromium/base/test/mock_devices_changed_observer.h @@ -0,0 +1,31 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_MOCK_DEVICES_CHANGED_OBSERVER_H_ +#define BASE_TEST_MOCK_DEVICES_CHANGED_OBSERVER_H_ + +#include <string> + +#include "base/macros.h" +#include "base/system/system_monitor.h" +#include "testing/gmock/include/gmock/gmock.h" + +namespace base { + +class MockDevicesChangedObserver + : public base::SystemMonitor::DevicesChangedObserver { + public: + MockDevicesChangedObserver(); + ~MockDevicesChangedObserver() override; + + MOCK_METHOD1(OnDevicesChanged, + void(base::SystemMonitor::DeviceType device_type)); + + private: + DISALLOW_COPY_AND_ASSIGN(MockDevicesChangedObserver); +}; + +} // namespace base + +#endif // BASE_TEST_MOCK_DEVICES_CHANGED_OBSERVER_H_ diff --git a/chromium/base/test/mock_entropy_provider.cc b/chromium/base/test/mock_entropy_provider.cc new file mode 100644 index 00000000000..f3fd2a481ea --- /dev/null +++ b/chromium/base/test/mock_entropy_provider.cc @@ -0,0 +1,20 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/mock_entropy_provider.h" + +namespace base { + +MockEntropyProvider::MockEntropyProvider() : entropy_value_(0.5) {} +MockEntropyProvider::MockEntropyProvider(double entropy_value) + : entropy_value_(entropy_value) {} +MockEntropyProvider::~MockEntropyProvider() = default; + +double MockEntropyProvider::GetEntropyForTrial( + const std::string& trial_name, + uint32_t randomization_seed) const { + return entropy_value_; +} + +} // namespace base diff --git a/chromium/base/test/mock_entropy_provider.h b/chromium/base/test/mock_entropy_provider.h new file mode 100644 index 00000000000..ca2b4bc8fe0 --- /dev/null +++ b/chromium/base/test/mock_entropy_provider.h @@ -0,0 +1,32 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_MOCK_ENTROPY_PROVIDER_H_ +#define BASE_TEST_MOCK_ENTROPY_PROVIDER_H_ + +#include <stdint.h> + +#include "base/metrics/field_trial.h" + +namespace base { + +class MockEntropyProvider : public base::FieldTrial::EntropyProvider { + public: + MockEntropyProvider(); + explicit MockEntropyProvider(double entropy_value); + ~MockEntropyProvider() override; + + // base::FieldTrial::EntropyProvider: + double GetEntropyForTrial(const std::string& trial_name, + uint32_t randomization_seed) const override; + + private: + double entropy_value_; + + DISALLOW_COPY_AND_ASSIGN(MockEntropyProvider); +}; + +} // namespace base + +#endif // BASE_TEST_MOCK_ENTROPY_PROVIDER_H_ diff --git a/chromium/base/test/mock_log.cc b/chromium/base/test/mock_log.cc new file mode 100644 index 00000000000..a09000d8ed7 --- /dev/null +++ b/chromium/base/test/mock_log.cc @@ -0,0 +1,68 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/mock_log.h" + +namespace base { +namespace test { + +// static +MockLog* MockLog::g_instance_ = nullptr; +Lock MockLog::g_lock; + +MockLog::MockLog() : is_capturing_logs_(false) { +} + +MockLog::~MockLog() { + if (is_capturing_logs_) { + StopCapturingLogs(); + } +} + +void MockLog::StartCapturingLogs() { + AutoLock scoped_lock(g_lock); + + // We don't use CHECK(), which can generate a new LOG message, and + // thus can confuse MockLog objects or other registered + // LogSinks. + RAW_CHECK(!is_capturing_logs_); + RAW_CHECK(!g_instance_); + + is_capturing_logs_ = true; + g_instance_ = this; + previous_handler_ = logging::GetLogMessageHandler(); + logging::SetLogMessageHandler(LogMessageHandler); +} + +void MockLog::StopCapturingLogs() { + AutoLock scoped_lock(g_lock); + + // We don't use CHECK(), which can generate a new LOG message, and + // thus can confuse MockLog objects or other registered + // LogSinks. + RAW_CHECK(is_capturing_logs_); + RAW_CHECK(g_instance_ == this); + + is_capturing_logs_ = false; + logging::SetLogMessageHandler(previous_handler_); + g_instance_ = nullptr; +} + +// static +bool MockLog::LogMessageHandler(int severity, + const char* file, + int line, + size_t message_start, + const std::string& str) { + // gMock guarantees thread-safety for calling a mocked method + // (https://github.com/google/googlemock/blob/master/googlemock/docs/CookBook.md#using-google-mock-and-threads) + // but we also need to make sure that Start/StopCapturingLogs are synchronized + // with LogMessageHandler. + AutoLock scoped_lock(g_lock); + + return g_instance_->Log(severity, file, line, message_start, str); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/mock_log.h b/chromium/base/test/mock_log.h new file mode 100644 index 00000000000..cda2fcd6259 --- /dev/null +++ b/chromium/base/test/mock_log.h @@ -0,0 +1,100 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_MOCK_LOG_H_ +#define BASE_TEST_MOCK_LOG_H_ + +#include <stddef.h> + +#include <string> + +#include "base/logging.h" +#include "base/macros.h" +#include "base/synchronization/lock.h" +#include "testing/gmock/include/gmock/gmock.h" + +namespace base { +namespace test { + +// A MockLog object intercepts LOG() messages issued during its lifespan. Using +// this together with gMock, it's very easy to test how a piece of code calls +// LOG(). The typical usage: +// +// TEST(FooTest, LogsCorrectly) { +// MockLog log; +// +// // We expect the WARNING "Something bad!" exactly twice. +// EXPECT_CALL(log, Log(WARNING, _, "Something bad!")) +// .Times(2); +// +// // We allow foo.cc to call LOG(INFO) any number of times. +// EXPECT_CALL(log, Log(INFO, HasSubstr("/foo.cc"), _)) +// .Times(AnyNumber()); +// +// log.StartCapturingLogs(); // Call this after done setting expectations. +// Foo(); // Exercises the code under test. +// } +// +// CAVEAT: base/logging does not allow a thread to call LOG() again when it's +// already inside a LOG() call. Doing so will cause a deadlock. Therefore, +// it's the user's responsibility to not call LOG() in an action triggered by +// MockLog::Log(). You may call RAW_LOG() instead. +class MockLog { + public: + // Creates a MockLog object that is not capturing logs. If it were to start + // to capture logs, it could be a problem if some other threads already exist + // and are logging, as the user hasn't had a chance to set up expectation on + // this object yet (calling a mock method before setting the expectation is + // UNDEFINED behavior). + MockLog(); + + // When the object is destructed, it stops intercepting logs. + ~MockLog(); + + // Starts log capturing if the object isn't already doing so. + // Otherwise crashes. + void StartCapturingLogs(); + + // Stops log capturing if the object is capturing logs. Otherwise crashes. + void StopCapturingLogs(); + + // Log method is invoked for every log message before it's sent to other log + // destinations (if any). The method should return true to signal that it + // handled the message and the message should not be sent to other log + // destinations. + MOCK_METHOD5(Log, + bool(int severity, + const char* file, + int line, + size_t message_start, + const std::string& str)); + + private: + // The currently active mock log. + static MockLog* g_instance_; + + // Lock protecting access to g_instance_. + static Lock g_lock; + + // Static function which is set as the logging message handler. + // Called once for each message. + static bool LogMessageHandler(int severity, + const char* file, + int line, + size_t message_start, + const std::string& str); + + // True if this object is currently capturing logs. + bool is_capturing_logs_; + + // The previous handler to restore when the MockLog is destroyed. + logging::LogMessageHandlerFunction previous_handler_; + + DISALLOW_COPY_AND_ASSIGN(MockLog); +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_MOCK_LOG_H_ diff --git a/chromium/base/test/move_only_int.h b/chromium/base/test/move_only_int.h new file mode 100644 index 00000000000..6e909836240 --- /dev/null +++ b/chromium/base/test/move_only_int.h @@ -0,0 +1,68 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_MOVE_ONLY_INT_H_ +#define BASE_TEST_MOVE_ONLY_INT_H_ + +#include "base/macros.h" + +namespace base { + +// A move-only class that holds an integer. This is designed for testing +// containers. See also CopyOnlyInt. +class MoveOnlyInt { + public: + explicit MoveOnlyInt(int data = 1) : data_(data) {} + MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; } + ~MoveOnlyInt() { data_ = 0; } + + MoveOnlyInt& operator=(MoveOnlyInt&& other) { + data_ = other.data_; + other.data_ = 0; + return *this; + } + + friend bool operator==(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return lhs.data_ == rhs.data_; + } + + friend bool operator!=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !operator==(lhs, rhs); + } + + friend bool operator<(const MoveOnlyInt& lhs, int rhs) { + return lhs.data_ < rhs; + } + + friend bool operator<(int lhs, const MoveOnlyInt& rhs) { + return lhs < rhs.data_; + } + + friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return lhs.data_ < rhs.data_; + } + + friend bool operator>(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !(lhs < rhs); + } + + int data() const { return data_; } + + private: + volatile int data_; + + DISALLOW_COPY_AND_ASSIGN(MoveOnlyInt); +}; + +} // namespace base + +#endif // BASE_TEST_MOVE_ONLY_INT_H_ diff --git a/chromium/base/test/multiprocess_test.cc b/chromium/base/test/multiprocess_test.cc new file mode 100644 index 00000000000..46556f75732 --- /dev/null +++ b/chromium/base/test/multiprocess_test.cc @@ -0,0 +1,74 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/multiprocess_test.h" + +#include "base/base_switches.h" +#include "base/command_line.h" +#include "base/files/file_path.h" +#include "base/files/file_util.h" +#include "base/threading/thread_restrictions.h" +#include "build/build_config.h" + +namespace base { + +#if !defined(OS_ANDROID) +Process SpawnMultiProcessTestChild(const std::string& procname, + const CommandLine& base_command_line, + const LaunchOptions& options) { + CommandLine command_line(base_command_line); + // TODO(viettrungluu): See comment above |MakeCmdLine()| in the header file. + // This is a temporary hack, since |MakeCmdLine()| has to provide a full + // command line. + if (!command_line.HasSwitch(switches::kTestChildProcess)) + command_line.AppendSwitchASCII(switches::kTestChildProcess, procname); + + return LaunchProcess(command_line, options); +} + +bool WaitForMultiprocessTestChildExit(const Process& process, + TimeDelta timeout, + int* exit_code) { + return process.WaitForExitWithTimeout(timeout, exit_code); +} + +bool TerminateMultiProcessTestChild(const Process& process, + int exit_code, + bool wait) { + return process.Terminate(exit_code, wait); +} + +#endif // !defined(OS_ANDROID) + +CommandLine GetMultiProcessTestChildBaseCommandLine() { + base::ScopedAllowBlockingForTesting allow_blocking; + CommandLine cmd_line = *CommandLine::ForCurrentProcess(); + cmd_line.SetProgram(MakeAbsoluteFilePath(cmd_line.GetProgram())); + return cmd_line; +} + +// MultiProcessTest ------------------------------------------------------------ + +MultiProcessTest::MultiProcessTest() = default; + +Process MultiProcessTest::SpawnChild(const std::string& procname) { + LaunchOptions options; +#if defined(OS_WIN) + options.start_hidden = true; +#endif + return SpawnChildWithOptions(procname, options); +} + +Process MultiProcessTest::SpawnChildWithOptions(const std::string& procname, + const LaunchOptions& options) { + return SpawnMultiProcessTestChild(procname, MakeCmdLine(procname), options); +} + +CommandLine MultiProcessTest::MakeCmdLine(const std::string& procname) { + CommandLine command_line = GetMultiProcessTestChildBaseCommandLine(); + command_line.AppendSwitchASCII(switches::kTestChildProcess, procname); + return command_line; +} + +} // namespace base diff --git a/chromium/base/test/multiprocess_test.h b/chromium/base/test/multiprocess_test.h new file mode 100644 index 00000000000..52c35e02ef2 --- /dev/null +++ b/chromium/base/test/multiprocess_test.h @@ -0,0 +1,151 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_MULTIPROCESS_TEST_H_ +#define BASE_TEST_MULTIPROCESS_TEST_H_ + +#include <string> + +#include "base/macros.h" +#include "base/process/launch.h" +#include "base/process/process.h" +#include "build/build_config.h" +#include "testing/platform_test.h" + +namespace base { + +class CommandLine; + +// Helpers to spawn a child for a multiprocess test and execute a designated +// function. Use these when you already have another base class for your test +// fixture, but you want (some) of your tests to be multiprocess (otherwise you +// may just want to derive your fixture from |MultiProcessTest|, below). +// +// Use these helpers as follows: +// +// TEST_F(MyTest, ATest) { +// CommandLine command_line( +// base::GetMultiProcessTestChildBaseCommandLine()); +// // Maybe add our own switches to |command_line|.... +// +// LaunchOptions options; +// // Maybe set some options (e.g., |start_hidden| on Windows).... +// +// // Start a child process and run |a_test_func|. +// base::Process test_child_process = +// base::SpawnMultiProcessTestChild("a_test_func", command_line, +// options); +// +// // Do stuff involving |test_child_process| and the child process.... +// +// int rv = -1; +// ASSERT_TRUE(base::WaitForMultiprocessTestChildExit(test_child_process, +// TestTimeouts::action_timeout(), &rv)); +// EXPECT_EQ(0, rv); +// } +// +// // Note: |MULTIPROCESS_TEST_MAIN()| is defined in +// // testing/multiprocess_func_list.h. +// MULTIPROCESS_TEST_MAIN(a_test_func) { +// // Code here runs in a child process.... +// return 0; +// } +// +// If you need to terminate the child process, use the +// TerminateMultiProcessTestChild method to ensure that test will work on +// Android. + +// Spawns a child process and executes the function |procname| declared using +// |MULTIPROCESS_TEST_MAIN()| or |MULTIPROCESS_TEST_MAIN_WITH_SETUP()|. +// |command_line| should be as provided by +// |GetMultiProcessTestChildBaseCommandLine()| (below), possibly with arguments +// added. Note: On Windows, you probably want to set |options.start_hidden|. +Process SpawnMultiProcessTestChild(const std::string& procname, + const CommandLine& command_line, + const LaunchOptions& options); + +// Gets the base command line for |SpawnMultiProcessTestChild()|. To this, you +// may add any flags needed for your child process. +CommandLine GetMultiProcessTestChildBaseCommandLine(); + +// Waits for the child process to exit. Returns true if the process exited +// within |timeout| and sets |exit_code| if non null. +bool WaitForMultiprocessTestChildExit(const Process& process, + TimeDelta timeout, + int* exit_code); + +// Terminates |process| with |exit_code|. If |wait| is true, this call blocks +// until the process actually terminates. +bool TerminateMultiProcessTestChild(const Process& process, + int exit_code, + bool wait); + +#if defined(OS_ANDROID) +// Returns whether the child process exited cleanly from the main runloop. +bool MultiProcessTestChildHasCleanExit(const Process& process); +#endif + +// MultiProcessTest ------------------------------------------------------------ + +// A MultiProcessTest is a test class which makes it easier to +// write a test which requires code running out of process. +// +// To create a multiprocess test simply follow these steps: +// +// 1) Derive your test from MultiProcessTest. Example: +// +// class MyTest : public MultiProcessTest { +// }; +// +// TEST_F(MyTest, TestCaseName) { +// ... +// } +// +// 2) Create a mainline function for the child processes and include +// testing/multiprocess_func_list.h. +// See the declaration of the MULTIPROCESS_TEST_MAIN macro +// in that file for an example. +// 3) Call SpawnChild("foo"), where "foo" is the name of +// the function you wish to run in the child processes. +// That's it! +class MultiProcessTest : public PlatformTest { + public: + MultiProcessTest(); + + protected: + // Run a child process. + // 'procname' is the name of a function which the child will + // execute. It must be exported from this library in order to + // run. + // + // Example signature: + // extern "C" int __declspec(dllexport) FooBar() { + // // do client work here + // } + // + // Returns the child process. + Process SpawnChild(const std::string& procname); + + // Run a child process using the given launch options. + // + // Note: On Windows, you probably want to set |options.start_hidden|. + Process SpawnChildWithOptions(const std::string& procname, + const LaunchOptions& options); + + // Set up the command line used to spawn the child process. + // Override this to add things to the command line (calling this first in the + // override). + // Note that currently some tests rely on this providing a full command line, + // which they then use directly with |LaunchProcess()|. + // TODO(viettrungluu): Remove this and add a virtual + // |ModifyChildCommandLine()|; make the two divergent uses more sane. + virtual CommandLine MakeCmdLine(const std::string& procname); + + private: + DISALLOW_COPY_AND_ASSIGN(MultiProcessTest); +}; + +} // namespace base + +#endif // BASE_TEST_MULTIPROCESS_TEST_H_ diff --git a/chromium/base/test/multiprocess_test_android.cc b/chromium/base/test/multiprocess_test_android.cc new file mode 100644 index 00000000000..f3e3ea34bca --- /dev/null +++ b/chromium/base/test/multiprocess_test_android.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/multiprocess_test.h" + +#include <string.h> +#include <vector> + +#include "base/android/jni_android.h" +#include "base/android/jni_array.h" +#include "base/android/scoped_java_ref.h" +#include "base/base_switches.h" +#include "base/check.h" +#include "base/command_line.h" +#include "base/test/test_support_jni_headers/MainReturnCodeResult_jni.h" +#include "base/test/test_support_jni_headers/MultiprocessTestClientLauncher_jni.h" + +namespace base { + +// A very basic implementation for Android. On Android tests can run in an APK +// and we don't have an executable to exec*. This implementation does the bare +// minimum to execute the method specified by procname (in the child process). +// - All options except |fds_to_remap| are ignored. +// +// NOTE: This MUST NOT run on the main thread of the NativeTest application. +Process SpawnMultiProcessTestChild(const std::string& procname, + const CommandLine& base_command_line, + const LaunchOptions& options) { + JNIEnv* env = android::AttachCurrentThread(); + DCHECK(env); + + std::vector<int> fd_keys; + std::vector<int> fd_fds; + for (auto& iter : options.fds_to_remap) { + fd_keys.push_back(iter.second); + fd_fds.push_back(iter.first); + } + + android::ScopedJavaLocalRef<jobjectArray> fds = + android::Java_MultiprocessTestClientLauncher_makeFdInfoArray( + env, base::android::ToJavaIntArray(env, fd_keys), + base::android::ToJavaIntArray(env, fd_fds)); + + CommandLine command_line(base_command_line); + if (!command_line.HasSwitch(switches::kTestChildProcess)) { + command_line.AppendSwitchASCII(switches::kTestChildProcess, procname); + } + + android::ScopedJavaLocalRef<jobjectArray> j_argv = + android::ToJavaArrayOfStrings(env, command_line.argv()); + jint pid = android::Java_MultiprocessTestClientLauncher_launchClient( + env, j_argv, fds); + return Process(pid); +} + +bool WaitForMultiprocessTestChildExit(const Process& process, + TimeDelta timeout, + int* exit_code) { + JNIEnv* env = android::AttachCurrentThread(); + DCHECK(env); + + base::android::ScopedJavaLocalRef<jobject> result_code = + android::Java_MultiprocessTestClientLauncher_waitForMainToReturn( + env, process.Pid(), static_cast<int32_t>(timeout.InMilliseconds())); + if (result_code.is_null() || + Java_MainReturnCodeResult_hasTimedOut(env, result_code)) { + return false; + } + if (exit_code) { + *exit_code = Java_MainReturnCodeResult_getReturnCode(env, result_code); + } + return true; +} + +bool TerminateMultiProcessTestChild(const Process& process, + int exit_code, + bool wait) { + JNIEnv* env = android::AttachCurrentThread(); + DCHECK(env); + + return android::Java_MultiprocessTestClientLauncher_terminate( + env, process.Pid(), exit_code, wait); +} + +bool MultiProcessTestChildHasCleanExit(const Process& process) { + JNIEnv* env = android::AttachCurrentThread(); + DCHECK(env); + + return android::Java_MultiprocessTestClientLauncher_hasCleanExit( + env, process.Pid()); +} + +} // namespace base diff --git a/chromium/base/test/native_library_test_utils.cc b/chromium/base/test/native_library_test_utils.cc new file mode 100644 index 00000000000..adcb1b01e90 --- /dev/null +++ b/chromium/base/test/native_library_test_utils.cc @@ -0,0 +1,19 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/native_library_test_utils.h" + +namespace { + +int g_static_value = 0; + +} // namespace + +extern "C" { + +int g_native_library_exported_value = 0; + +int NativeLibraryTestIncrement() { return ++g_static_value; } + +} // extern "C" diff --git a/chromium/base/test/native_library_test_utils.h b/chromium/base/test/native_library_test_utils.h new file mode 100644 index 00000000000..e26fd1a04e6 --- /dev/null +++ b/chromium/base/test/native_library_test_utils.h @@ -0,0 +1,26 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_NATIVE_LIBRARY_TEST_UTILS_H_ +#define BASE_TEST_NATIVE_LIBRARY_TEST_UTILS_H_ + +#include "build/build_config.h" + +#if defined(OS_WIN) +#define NATIVE_LIBRARY_TEST_ALWAYS_EXPORT __declspec(dllexport) +#else +#define NATIVE_LIBRARY_TEST_ALWAYS_EXPORT __attribute__((visibility("default"))) +#endif + +extern "C" { + +extern NATIVE_LIBRARY_TEST_ALWAYS_EXPORT int g_native_library_exported_value; + +// A function which increments an internal counter value and returns its value. +// The first call returns 1, then 2, etc. +NATIVE_LIBRARY_TEST_ALWAYS_EXPORT int NativeLibraryTestIncrement(); + +} // extern "C" + +#endif // BASE_TEST_NATIVE_LIBRARY_TEST_UTILS_H_ diff --git a/chromium/base/test/null_task_runner.cc b/chromium/base/test/null_task_runner.cc new file mode 100644 index 00000000000..dfa26fa313c --- /dev/null +++ b/chromium/base/test/null_task_runner.cc @@ -0,0 +1,29 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/null_task_runner.h" + +namespace base { + +NullTaskRunner::NullTaskRunner() = default; + +NullTaskRunner::~NullTaskRunner() = default; + +bool NullTaskRunner::PostDelayedTask(const Location& from_here, + OnceClosure task, + base::TimeDelta delay) { + return false; +} + +bool NullTaskRunner::PostNonNestableDelayedTask(const Location& from_here, + OnceClosure task, + base::TimeDelta delay) { + return false; +} + +bool NullTaskRunner::RunsTasksInCurrentSequence() const { + return true; +} + +} // namespace base diff --git a/chromium/base/test/null_task_runner.h b/chromium/base/test/null_task_runner.h new file mode 100644 index 00000000000..8ed339526d0 --- /dev/null +++ b/chromium/base/test/null_task_runner.h @@ -0,0 +1,44 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_NULL_TASK_RUNNER_H_ +#define BASE_TEST_NULL_TASK_RUNNER_H_ + +#include "base/callback.h" +#include "base/compiler_specific.h" +#include "base/macros.h" +#include "base/single_thread_task_runner.h" + +namespace base { + +// ATTENTION: Prefer SingleThreadTaskEnvironment or TaskEnvironment w/ +// ThreadPoolExecutionMode::QUEUED over this class. A NullTaskRunner might seem +// appealing, but not running tasks is under-testing the potential side-effects +// of the code under tests. All tests should be okay if tasks born from their +// actions are run or deleted at a later point. +// +// Helper class for tests that need to provide an implementation of a +// *TaskRunner class but don't actually care about tasks being run. +class NullTaskRunner : public base::SingleThreadTaskRunner { + public: + NullTaskRunner(); + + bool PostDelayedTask(const Location& from_here, + base::OnceClosure task, + base::TimeDelta delay) override; + bool PostNonNestableDelayedTask(const Location& from_here, + base::OnceClosure task, + base::TimeDelta delay) override; + // Always returns true to avoid triggering DCHECKs. + bool RunsTasksInCurrentSequence() const override; + + protected: + ~NullTaskRunner() override; + + DISALLOW_COPY_AND_ASSIGN(NullTaskRunner); +}; + +} // namespace base + +#endif // BASE_TEST_NULL_TASK_RUNNER_H_ diff --git a/chromium/base/test/perf_log.cc b/chromium/base/test/perf_log.cc new file mode 100644 index 00000000000..e275ac0922f --- /dev/null +++ b/chromium/base/test/perf_log.cc @@ -0,0 +1,45 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/perf_log.h" + +#include "base/files/file_util.h" +#include "base/notreached.h" + +namespace base { + +static FILE* perf_log_file = nullptr; + +bool InitPerfLog(const FilePath& log_file) { + if (perf_log_file) { + // trying to initialize twice + NOTREACHED(); + return false; + } + + perf_log_file = OpenFile(log_file, "w"); + return perf_log_file != nullptr; +} + +void FinalizePerfLog() { + if (!perf_log_file) { + // trying to cleanup without initializing + NOTREACHED(); + return; + } + base::CloseFile(perf_log_file); +} + +void LogPerfResult(const char* test_name, double value, const char* units) { + if (!perf_log_file) { + NOTREACHED(); + return; + } + + fprintf(perf_log_file, "%s\t%g\t%s\n", test_name, value, units); + printf("%s\t%g\t%s\n", test_name, value, units); + fflush(stdout); +} + +} // namespace base diff --git a/chromium/base/test/perf_log.h b/chromium/base/test/perf_log.h new file mode 100644 index 00000000000..5d6ed9f8ba4 --- /dev/null +++ b/chromium/base/test/perf_log.h @@ -0,0 +1,24 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_PERF_LOG_H_ +#define BASE_TEST_PERF_LOG_H_ + +namespace base { + +class FilePath; + +// Initializes and finalizes the perf log. These functions should be +// called at the beginning and end (respectively) of running all the +// performance tests. The init function returns true on success. +bool InitPerfLog(const FilePath& log_path); +void FinalizePerfLog(); + +// Writes to the perf result log the given 'value' resulting from the +// named 'test'. The units are to aid in reading the log by people. +void LogPerfResult(const char* test_name, double value, const char* units); + +} // namespace base + +#endif // BASE_TEST_PERF_LOG_H_ diff --git a/chromium/base/test/perf_test_suite.cc b/chromium/base/test/perf_test_suite.cc new file mode 100644 index 00000000000..2e2cdbb751e --- /dev/null +++ b/chromium/base/test/perf_test_suite.cc @@ -0,0 +1,50 @@ +// Copyright (c) 2010 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/perf_test_suite.h" + +#include "base/command_line.h" +#include "base/debug/debugger.h" +#include "base/files/file_path.h" +#include "base/path_service.h" +#include "base/process/launch.h" +#include "base/strings/string_util.h" +#include "base/test/perf_log.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +PerfTestSuite::PerfTestSuite(int argc, char** argv) : TestSuite(argc, argv) {} + +void PerfTestSuite::Initialize() { + TestSuite::Initialize(); + + // Initialize the perf timer log + FilePath log_path = + CommandLine::ForCurrentProcess()->GetSwitchValuePath("log-file"); + if (log_path.empty()) { + PathService::Get(FILE_EXE, &log_path); +#if defined(OS_ANDROID) || defined(OS_FUCHSIA) + base::FilePath tmp_dir; + PathService::Get(base::DIR_CACHE, &tmp_dir); + log_path = tmp_dir.Append(log_path.BaseName()); +#endif + log_path = log_path.ReplaceExtension(FILE_PATH_LITERAL("log")); + log_path = log_path.InsertBeforeExtension(FILE_PATH_LITERAL("_perf")); + } + ASSERT_TRUE(InitPerfLog(log_path)); + + // Raise to high priority to have more precise measurements. Since we don't + // aim at 1% precision, it is not necessary to run at realtime level. + if (!debug::BeingDebugged()) + RaiseProcessToHighPriority(); +} + +void PerfTestSuite::Shutdown() { + TestSuite::Shutdown(); + FinalizePerfLog(); +} + +} // namespace base diff --git a/chromium/base/test/perf_test_suite.h b/chromium/base/test/perf_test_suite.h new file mode 100644 index 00000000000..52528f0ac45 --- /dev/null +++ b/chromium/base/test/perf_test_suite.h @@ -0,0 +1,22 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_PERF_TEST_SUITE_H_ +#define BASE_TEST_PERF_TEST_SUITE_H_ + +#include "base/test/test_suite.h" + +namespace base { + +class PerfTestSuite : public TestSuite { + public: + PerfTestSuite(int argc, char** argv); + + void Initialize() override; + void Shutdown() override; +}; + +} // namespace base + +#endif // BASE_TEST_PERF_TEST_SUITE_H_ diff --git a/chromium/base/test/perf_time_logger.cc b/chromium/base/test/perf_time_logger.cc new file mode 100644 index 00000000000..c05ba51b7d8 --- /dev/null +++ b/chromium/base/test/perf_time_logger.cc @@ -0,0 +1,27 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/perf_time_logger.h" + +#include "base/test/perf_log.h" + +namespace base { + +PerfTimeLogger::PerfTimeLogger(const char* test_name) + : logged_(false), test_name_(test_name) {} + +PerfTimeLogger::~PerfTimeLogger() { + if (!logged_) + Done(); +} + +void PerfTimeLogger::Done() { + // we use a floating-point millisecond value because it is more + // intuitive than microseconds and we want more precision than + // integer milliseconds + LogPerfResult(test_name_.c_str(), timer_.Elapsed().InMillisecondsF(), "ms"); + logged_ = true; +} + +} // namespace base diff --git a/chromium/base/test/perf_time_logger.h b/chromium/base/test/perf_time_logger.h new file mode 100644 index 00000000000..a5f3e8a70c3 --- /dev/null +++ b/chromium/base/test/perf_time_logger.h @@ -0,0 +1,37 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_PERF_TIME_LOGGER_H_ +#define BASE_TEST_PERF_TIME_LOGGER_H_ + +#include <string> + +#include "base/macros.h" +#include "base/timer/elapsed_timer.h" + +namespace base { + +// Automates calling LogPerfResult for the common case where you want +// to measure the time that something took. Call Done() when the test +// is complete if you do extra work after the test or there are stack +// objects with potentially expensive constructors. Otherwise, this +// class with automatically log on destruction. +class PerfTimeLogger { + public: + explicit PerfTimeLogger(const char* test_name); + ~PerfTimeLogger(); + + void Done(); + + private: + bool logged_; + std::string test_name_; + ElapsedTimer timer_; + + DISALLOW_COPY_AND_ASSIGN(PerfTimeLogger); +}; + +} // namespace base + +#endif // BASE_TEST_PERF_TIME_LOGGER_H_ diff --git a/chromium/base/test/power_monitor_test_base.cc b/chromium/base/test/power_monitor_test_base.cc new file mode 100644 index 00000000000..f37fb579688 --- /dev/null +++ b/chromium/base/test/power_monitor_test_base.cc @@ -0,0 +1,65 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/power_monitor_test_base.h" + +#include "base/message_loop/message_loop_current.h" +#include "base/power_monitor/power_monitor.h" +#include "base/power_monitor/power_monitor_source.h" +#include "base/run_loop.h" + +namespace base { + +PowerMonitorTestSource::PowerMonitorTestSource() + : test_on_battery_power_(false) { + DCHECK(MessageLoopCurrent::Get()) + << "PowerMonitorTestSource requires a MessageLoop."; +} + +PowerMonitorTestSource::~PowerMonitorTestSource() = default; + +void PowerMonitorTestSource::GeneratePowerStateEvent(bool on_battery_power) { + test_on_battery_power_ = on_battery_power; + ProcessPowerEvent(POWER_STATE_EVENT); + RunLoop().RunUntilIdle(); +} + +void PowerMonitorTestSource::GenerateSuspendEvent() { + ProcessPowerEvent(SUSPEND_EVENT); + RunLoop().RunUntilIdle(); +} + +void PowerMonitorTestSource::GenerateResumeEvent() { + ProcessPowerEvent(RESUME_EVENT); + RunLoop().RunUntilIdle(); +} + +bool PowerMonitorTestSource::IsOnBatteryPowerImpl() { + return test_on_battery_power_; +} + +PowerMonitorTestObserver::PowerMonitorTestObserver() + : last_power_state_(false), + power_state_changes_(0), + suspends_(0), + resumes_(0) { +} + +PowerMonitorTestObserver::~PowerMonitorTestObserver() = default; + +// PowerObserver callbacks. +void PowerMonitorTestObserver::OnPowerStateChange(bool on_battery_power) { + last_power_state_ = on_battery_power; + power_state_changes_++; +} + +void PowerMonitorTestObserver::OnSuspend() { + suspends_++; +} + +void PowerMonitorTestObserver::OnResume() { + resumes_++; +} + +} // namespace base diff --git a/chromium/base/test/power_monitor_test_base.h b/chromium/base/test/power_monitor_test_base.h new file mode 100644 index 00000000000..3086bb87496 --- /dev/null +++ b/chromium/base/test/power_monitor_test_base.h @@ -0,0 +1,53 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_POWER_MONITOR_TEST_BASE_H_ +#define BASE_TEST_POWER_MONITOR_TEST_BASE_H_ + +#include "base/power_monitor/power_monitor.h" +#include "base/power_monitor/power_monitor_source.h" + +namespace base { + +class PowerMonitorTestSource : public PowerMonitorSource { + public: + PowerMonitorTestSource(); + ~PowerMonitorTestSource() override; + + void GeneratePowerStateEvent(bool on_battery_power); + void GenerateSuspendEvent(); + void GenerateResumeEvent(); + + protected: + bool IsOnBatteryPowerImpl() override; + + bool test_on_battery_power_; +}; + +class PowerMonitorTestObserver : public PowerObserver { + public: + PowerMonitorTestObserver(); + ~PowerMonitorTestObserver() override; + + // PowerObserver callbacks. + void OnPowerStateChange(bool on_battery_power) override; + void OnSuspend() override; + void OnResume() override; + + // Test status counts. + bool last_power_state() const { return last_power_state_; } + int power_state_changes() const { return power_state_changes_; } + int suspends() const { return suspends_; } + int resumes() const { return resumes_; } + + private: + bool last_power_state_; // Last power state we were notified of. + int power_state_changes_; // Count of OnPowerStateChange notifications. + int suspends_; // Count of OnSuspend notifications. + int resumes_; // Count of OnResume notifications. +}; + +} // namespace base + +#endif // BASE_TEST_POWER_MONITOR_TEST_BASE_H_ diff --git a/chromium/base/test/reached_code_profiler_android.cc b/chromium/base/test/reached_code_profiler_android.cc new file mode 100644 index 00000000000..cfedc6b37a8 --- /dev/null +++ b/chromium/base/test/reached_code_profiler_android.cc @@ -0,0 +1,25 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/android/jni_android.h" +#include "base/android/reached_code_profiler.h" +#include "base/test/test_support_jni_headers/ReachedCodeProfiler_jni.h" + +// This file provides functions to query the state of the reached code profiler +// from Java. It's used only for tests. +namespace base { +namespace android { + +static jboolean JNI_ReachedCodeProfiler_IsReachedCodeProfilerEnabled( + JNIEnv* env) { + return IsReachedCodeProfilerEnabled(); +} + +static jboolean JNI_ReachedCodeProfiler_IsReachedCodeProfilerSupported( + JNIEnv* env) { + return IsReachedCodeProfilerSupported(); +} + +} // namespace android +} // namespace base diff --git a/chromium/base/test/run_all_base_unittests.cc b/chromium/base/test/run_all_base_unittests.cc new file mode 100644 index 00000000000..aa7a9bf5bb0 --- /dev/null +++ b/chromium/base/test/run_all_base_unittests.cc @@ -0,0 +1,15 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/bind.h" +#include "base/test/launcher/unit_test_launcher.h" +#include "base/test/test_suite.h" +#include "build/build_config.h" + +int main(int argc, char** argv) { + base::TestSuite test_suite(argc, argv); + return base::LaunchUnitTests( + argc, argv, + base::BindOnce(&base::TestSuite::Run, base::Unretained(&test_suite))); +} diff --git a/chromium/base/test/run_all_perftests.cc b/chromium/base/test/run_all_perftests.cc new file mode 100644 index 00000000000..6e38109376a --- /dev/null +++ b/chromium/base/test/run_all_perftests.cc @@ -0,0 +1,9 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/perf_test_suite.h" + +int main(int argc, char** argv) { + return base::PerfTestSuite(argc, argv).Run(); +} diff --git a/chromium/base/test/run_all_unittests.cc b/chromium/base/test/run_all_unittests.cc new file mode 100644 index 00000000000..0ad84ed53d2 --- /dev/null +++ b/chromium/base/test/run_all_unittests.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/bind.h" +#include "base/test/launcher/unit_test_launcher.h" +#include "base/test/test_suite.h" +#include "build/build_config.h" + +int main(int argc, char** argv) { + base::TestSuite test_suite(argc, argv); + return base::LaunchUnitTests( + argc, argv, + base::BindOnce(&base::TestSuite::Run, base::Unretained(&test_suite))); +} diff --git a/chromium/base/test/scoped_command_line.cc b/chromium/base/test/scoped_command_line.cc new file mode 100644 index 00000000000..c74d243f448 --- /dev/null +++ b/chromium/base/test/scoped_command_line.cc @@ -0,0 +1,22 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_command_line.h" + +namespace base { +namespace test { + +ScopedCommandLine::ScopedCommandLine() + : original_command_line_(*base::CommandLine::ForCurrentProcess()) {} + +ScopedCommandLine::~ScopedCommandLine() { + *base::CommandLine::ForCurrentProcess() = original_command_line_; +} + +CommandLine* ScopedCommandLine::GetProcessCommandLine() { + return base::CommandLine::ForCurrentProcess(); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_command_line.h b/chromium/base/test/scoped_command_line.h new file mode 100644 index 00000000000..dea0c6ac1e6 --- /dev/null +++ b/chromium/base/test/scoped_command_line.h @@ -0,0 +1,34 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_COMMAND_LINE_H_ +#define BASE_TEST_SCOPED_COMMAND_LINE_H_ + +#include "base/command_line.h" + +namespace base { +namespace test { + +// Helper class to restore the original command line at the end of the scope. +// NOTE: In most unit tests, the command line is automatically restored per +// test, so this class is not necessary if the command line applies to +// the entire single test. +class ScopedCommandLine final { + public: + ScopedCommandLine(); + ~ScopedCommandLine(); + + // Gets the command line for the current process. + // NOTE: Do not name this GetCommandLine as this will conflict with Windows's + // GetCommandLine and get renamed to GetCommandLineW. + CommandLine* GetProcessCommandLine(); + + private: + const CommandLine original_command_line_; +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_SCOPED_COMMAND_LINE_H_ diff --git a/chromium/base/test/scoped_environment_variable_override.cc b/chromium/base/test/scoped_environment_variable_override.cc new file mode 100644 index 00000000000..4b7b3871415 --- /dev/null +++ b/chromium/base/test/scoped_environment_variable_override.cc @@ -0,0 +1,33 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_environment_variable_override.h" + +#include "base/environment.h" + +namespace base { +namespace test { + +ScopedEnvironmentVariableOverride::ScopedEnvironmentVariableOverride( + const std::string& variable_name, + const std::string& value) + : environment_(Environment::Create()), + variable_name_(variable_name), + overridden_(false), + was_set_(false) { + was_set_ = environment_->GetVar(variable_name, &old_value_); + overridden_ = environment_->SetVar(variable_name, value); +} + +ScopedEnvironmentVariableOverride::~ScopedEnvironmentVariableOverride() { + if (overridden_) { + if (was_set_) + environment_->SetVar(variable_name_, old_value_); + else + environment_->UnSetVar(variable_name_); + } +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_environment_variable_override.h b/chromium/base/test/scoped_environment_variable_override.h new file mode 100644 index 00000000000..b05b5f9a405 --- /dev/null +++ b/chromium/base/test/scoped_environment_variable_override.h @@ -0,0 +1,40 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_ENVIRONMENT_VARIABLE_OVERRIDE_H_ +#define BASE_TEST_SCOPED_ENVIRONMENT_VARIABLE_OVERRIDE_H_ + +#include <memory> +#include <string> + +namespace base { + +class Environment; + +namespace test { + +// Helper class to override |variable_name| environment variable to |value| for +// the lifetime of this class. Upon destruction, the previous value is restored. +class ScopedEnvironmentVariableOverride final { + public: + ScopedEnvironmentVariableOverride(const std::string& variable_name, + const std::string& value); + ~ScopedEnvironmentVariableOverride(); + + base::Environment* GetEnv() { return environment_.get(); } + bool IsOverridden() { return overridden_; } + bool WasSet() { return was_set_; } + + private: + std::unique_ptr<Environment> environment_; + std::string variable_name_; + bool overridden_; + bool was_set_; + std::string old_value_; +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_SCOPED_ENVIRONMENT_VARIABLE_OVERRIDE_H_ diff --git a/chromium/base/test/scoped_feature_list.cc b/chromium/base/test/scoped_feature_list.cc new file mode 100644 index 00000000000..0e5afdb2e36 --- /dev/null +++ b/chromium/base/test/scoped_feature_list.cc @@ -0,0 +1,301 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_feature_list.h" + +#include <algorithm> +#include <utility> +#include <vector> + +#include "base/memory/ptr_util.h" +#include "base/metrics/field_trial_param_associator.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/string_split.h" +#include "base/strings/string_util.h" +#include "base/test/mock_entropy_provider.h" + +namespace base { +namespace test { + +namespace { + +constexpr char kTrialGroup[] = "scoped_feature_list_trial_group"; + +std::vector<StringPiece> GetFeatureVector( + const std::vector<Feature>& features) { + std::vector<StringPiece> output; + for (const Feature& feature : features) { + output.push_back(feature.name); + } + + return output; +} + +std::vector<StringPiece> GetFeatureVectorFromFeaturesAndParams( + const std::vector<ScopedFeatureList::FeatureAndParams>& + features_and_params) { + std::vector<StringPiece> output; + for (const auto& entry : features_and_params) { + output.push_back(entry.feature.name); + } + + return output; +} + +// Extracts a feature name from a feature state string. For example, given +// the input "*MyLovelyFeature<SomeFieldTrial", returns "MyLovelyFeature". +StringPiece GetFeatureName(StringPiece feature) { + StringPiece feature_name = feature; + + // Remove default info. + if (feature_name.starts_with("*")) + feature_name = feature_name.substr(1); + + // Remove field_trial info. + std::size_t index = feature_name.find("<"); + if (index != std::string::npos) + feature_name = feature_name.substr(0, index); + + return feature_name; +} + +struct Features { + std::vector<StringPiece> enabled_feature_list; + std::vector<StringPiece> disabled_feature_list; +}; + +// Features in |feature_vector| came from |merged_features| in +// OverrideFeatures() and contains linkage with field trial is case when they +// have parameters (with '<' simbol). In |feature_name| name is already cleared +// with GetFeatureName() and also could be without parameters. +bool ContainsFeature(const std::vector<StringPiece>& feature_vector, + StringPiece feature_name) { + auto iter = std::find_if(feature_vector.begin(), feature_vector.end(), + [&feature_name](const StringPiece& a) { + return GetFeatureName(a) == feature_name; + }); + return iter != feature_vector.end(); +} + +// Merges previously-specified feature overrides with those passed into one of +// the Init() methods. |features| should be a list of features previously +// overridden to be in the |override_state|. |merged_features| should contain +// the enabled and disabled features passed into the Init() method, plus any +// overrides merged as a result of previous calls to this function. +void OverrideFeatures(const std::string& features, + FeatureList::OverrideState override_state, + Features* merged_features) { + std::vector<StringPiece> features_list = + SplitStringPiece(features, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY); + + for (StringPiece feature : features_list) { + StringPiece feature_name = GetFeatureName(feature); + + if (ContainsFeature(merged_features->enabled_feature_list, feature_name) || + ContainsFeature(merged_features->disabled_feature_list, feature_name)) { + continue; + } + + if (override_state == FeatureList::OverrideState::OVERRIDE_ENABLE_FEATURE) { + merged_features->enabled_feature_list.push_back(feature); + } else { + DCHECK_EQ(override_state, + FeatureList::OverrideState::OVERRIDE_DISABLE_FEATURE); + merged_features->disabled_feature_list.push_back(feature); + } + } +} + +// Hex encode params so that special characters do not break formatting. +std::string HexEncodeString(const std::string& input) { + return HexEncode(input.data(), input.size()); +} + +// Inverse of HexEncodeString(). +std::string HexDecodeString(const std::string& input) { + if (input.empty()) + return std::string(); + std::string bytes; + bool result = HexStringToString(input, &bytes); + DCHECK(result); + return bytes; +} + +} // namespace + +ScopedFeatureList::FeatureAndParams::FeatureAndParams( + const Feature& feature, + const FieldTrialParams& params) + : feature(feature), params(params) {} + +ScopedFeatureList::FeatureAndParams::~FeatureAndParams() = default; + +ScopedFeatureList::FeatureAndParams::FeatureAndParams( + const FeatureAndParams& other) = default; + +ScopedFeatureList::ScopedFeatureList() = default; + +ScopedFeatureList::~ScopedFeatureList() { + Reset(); +} + +void ScopedFeatureList::Reset() { + // If one of the Init() functions was never called, don't reset anything. + if (!init_called_) + return; + + init_called_ = false; + + FeatureList::ClearInstanceForTesting(); + + if (field_trial_list_) { + field_trial_list_.reset(); + + // Restore params to how they were before. + FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting(); + AssociateFieldTrialParamsFromString(original_params_, &HexDecodeString); + + FieldTrialList::RestoreInstanceForTesting(original_field_trial_list_); + original_field_trial_list_ = nullptr; + } + if (original_feature_list_) + FeatureList::RestoreInstanceForTesting(std::move(original_feature_list_)); +} + +void ScopedFeatureList::Init() { + InitWithFeaturesImpl({}, {}, {}); +} + +void ScopedFeatureList::InitWithFeatureList( + std::unique_ptr<FeatureList> feature_list) { + DCHECK(!original_feature_list_); + original_feature_list_ = FeatureList::ClearInstanceForTesting(); + FeatureList::SetInstance(std::move(feature_list)); + init_called_ = true; +} + +void ScopedFeatureList::InitFromCommandLine( + const std::string& enable_features, + const std::string& disable_features) { + std::unique_ptr<FeatureList> feature_list(new FeatureList); + feature_list->InitializeFromCommandLine(enable_features, disable_features); + InitWithFeatureList(std::move(feature_list)); +} + +void ScopedFeatureList::InitWithFeatures( + const std::vector<Feature>& enabled_features, + const std::vector<Feature>& disabled_features) { + InitWithFeaturesImpl(enabled_features, {}, disabled_features); +} + +void ScopedFeatureList::InitAndEnableFeature(const Feature& feature) { + InitWithFeaturesImpl({feature}, {}, {}); +} + +void ScopedFeatureList::InitAndDisableFeature(const Feature& feature) { + InitWithFeaturesImpl({}, {}, {feature}); +} + +void ScopedFeatureList::InitWithFeatureState(const Feature& feature, + bool enabled) { + if (enabled) { + InitAndEnableFeature(feature); + } else { + InitAndDisableFeature(feature); + } +} + +void ScopedFeatureList::InitWithFeaturesImpl( + const std::vector<Feature>& enabled_features, + const std::vector<FeatureAndParams>& enabled_features_and_params, + const std::vector<Feature>& disabled_features) { + DCHECK(!init_called_); + DCHECK(enabled_features.empty() || enabled_features_and_params.empty()); + + Features merged_features; + if (!enabled_features_and_params.empty()) { + merged_features.enabled_feature_list = + GetFeatureVectorFromFeaturesAndParams(enabled_features_and_params); + } else { + merged_features.enabled_feature_list = GetFeatureVector(enabled_features); + } + merged_features.disabled_feature_list = GetFeatureVector(disabled_features); + + std::string current_enabled_features; + std::string current_disabled_features; + FeatureList* feature_list = FeatureList::GetInstance(); + if (feature_list) { + feature_list->GetFeatureOverrides(¤t_enabled_features, + ¤t_disabled_features); + } + + // Save off the existing field trials and params. + std::string existing_trial_state; + FieldTrialList::AllStatesToString(&existing_trial_state, true); + original_params_ = FieldTrialList::AllParamsToString(true, &HexEncodeString); + + // Back up the current field trial list, to be restored in Reset(). + original_field_trial_list_ = FieldTrialList::BackupInstanceForTesting(); + + // Create a field trial list, to which we'll add trials corresponding to the + // features that have params, before restoring the field trial state from the + // previous instance, further down in this function. + field_trial_list_ = + std::make_unique<FieldTrialList>(std::make_unique<MockEntropyProvider>()); + + // Associate override params. This needs to be done before trial state gets + // restored, as that will activate trials, locking down param association. + auto* field_trial_param_associator = FieldTrialParamAssociator::GetInstance(); + std::vector<std::string> features_with_trial; + auto feature_it = merged_features.enabled_feature_list.begin(); + for (const auto& enabled_feature : enabled_features_and_params) { + const std::string feature_name = enabled_feature.feature.name; + const std::string trial_name = + "scoped_feature_list_trial_for_" + feature_name; + + scoped_refptr<FieldTrial> field_trial_override = + FieldTrialList::CreateFieldTrial(trial_name, kTrialGroup); + DCHECK(field_trial_override); + + field_trial_param_associator->ClearParamsForTesting(trial_name, + kTrialGroup); + bool success = field_trial_param_associator->AssociateFieldTrialParams( + trial_name, kTrialGroup, enabled_feature.params); + DCHECK(success); + + features_with_trial.push_back(feature_name + "<" + trial_name); + *feature_it = features_with_trial.back(); + ++feature_it; + } + // Restore other field trials. Note: We don't need to do anything for params + // here because the param associator already has the right state, which has + // been backed up via |original_params_| to be restored later. + FieldTrialList::CreateTrialsFromString(existing_trial_state, {}); + + OverrideFeatures(current_enabled_features, + FeatureList::OverrideState::OVERRIDE_ENABLE_FEATURE, + &merged_features); + OverrideFeatures(current_disabled_features, + FeatureList::OverrideState::OVERRIDE_DISABLE_FEATURE, + &merged_features); + + std::string enabled = JoinString(merged_features.enabled_feature_list, ","); + std::string disabled = JoinString(merged_features.disabled_feature_list, ","); + InitFromCommandLine(enabled, disabled); +} + +void ScopedFeatureList::InitAndEnableFeatureWithParameters( + const Feature& feature, + const FieldTrialParams& feature_parameters) { + InitWithFeaturesAndParameters({{feature, feature_parameters}}, {}); +} + +void ScopedFeatureList::InitWithFeaturesAndParameters( + const std::vector<FeatureAndParams>& enabled_features, + const std::vector<Feature>& disabled_features) { + InitWithFeaturesImpl({}, enabled_features, disabled_features); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_feature_list.h b/chromium/base/test/scoped_feature_list.h new file mode 100644 index 00000000000..1d5bc7e3304 --- /dev/null +++ b/chromium/base/test/scoped_feature_list.h @@ -0,0 +1,152 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_FEATURE_LIST_H_ +#define BASE_TEST_SCOPED_FEATURE_LIST_H_ + +#include <map> +#include <memory> +#include <string> +#include <vector> + +#include "base/feature_list.h" +#include "base/memory/ref_counted.h" +#include "base/metrics/field_trial.h" +#include "base/metrics/field_trial_params.h" + +namespace base { +namespace test { + +// ScopedFeatureList resets the global FeatureList instance to a new empty +// instance and restores the original instance upon destruction. When using the +// non-deprecated APIs, a corresponding FieldTrialList is also created. +// +// Note: Re-using the same object is not allowed. To reset the feature +// list and initialize it anew, destroy an existing scoped list and init +// a new one. +// +// If multiple instances of this class are used in a nested fashion, they +// should be destroyed in the opposite order of their Init*() methods being +// called. +// +// ScopedFeatureList needs to be initialized (via one of Init*() methods) +// before running code that inspects the state of features, such as in the +// constructor of the test harness. +// +// WARNING: To be clear, in multithreaded test environments (such as browser +// tests) there may background threads using FeatureList before the test body is +// even entered. In these cases it is imperative that ScopedFeatureList be +// initialized BEFORE those threads are started, hence the recommendation to do +// initialization in the test harness's constructor. +class ScopedFeatureList final { + public: + ScopedFeatureList(); + ~ScopedFeatureList(); + + struct FeatureAndParams { + FeatureAndParams(const Feature& feature, const FieldTrialParams& params); + ~FeatureAndParams(); + + FeatureAndParams(const FeatureAndParams& other); + + const Feature& feature; + const FieldTrialParams params; + }; + + // Resets the instance to a non-initialized state. + void Reset(); + + // Initializes and registers a FeatureList instance without any additional + // enabled or disabled features. Existing state, if any, will be kept. This is + // equivalent to calling InitWithFeatures({}, {}). + void Init(); + + // WARNING: This method will reset any globally configured features to their + // default values, which can hide feature interaction bugs. Please use + // sparingly. https://crbug.com/713390 + // Initializes and registers the given FeatureList instance. + void InitWithFeatureList(std::unique_ptr<FeatureList> feature_list); + + // WARNING: This method will reset any globally configured features to their + // default values, which can hide feature interaction bugs. Please use + // sparingly. https://crbug.com/713390 + // Initializes and registers a FeatureList instance with only the given + // enabled and disabled features (comma-separated names). + void InitFromCommandLine(const std::string& enable_features, + const std::string& disable_features); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with the given enabled and disabled features. + // Any feature overrides already present in the global FeatureList will + // continue to apply, unless they conflict with the overrides passed into this + // method. This is important for testing potentially unexpected feature + // interactions. + void InitWithFeatures(const std::vector<Feature>& enabled_features, + const std::vector<Feature>& disabled_features); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with single enabled feature. + void InitAndEnableFeature(const Feature& feature); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with single enabled feature and associated field + // trial parameters. + // Note: this creates a scoped global field trial list if there is not + // currently one. + void InitAndEnableFeatureWithParameters( + const Feature& feature, + const FieldTrialParams& feature_parameters); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with the given enabled features and the + // specified field trial parameters, and the given disabled features. + // Note: This creates a scoped global field trial list if there is not + // currently one. + void InitWithFeaturesAndParameters( + const std::vector<FeatureAndParams>& enabled_features, + const std::vector<Feature>& disabled_features); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with single disabled feature. + void InitAndDisableFeature(const Feature& feature); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overriden with a single feature either enabled or + // disabled depending on |enabled|. + void InitWithFeatureState(const Feature& feature, bool enabled); + + private: + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with the given enabled and disabled features. + // Any feature overrides already present in the global FeatureList will + // continue to apply, unless they conflict with the overrides passed into this + // method. + // Features to enable may be specified through either |enabled_features| or + // |enabled_feature_and_params|, but not both (i.e. one of these must be + // empty). + void InitWithFeaturesImpl( + const std::vector<Feature>& enabled_features, + const std::vector<FeatureAndParams>& enabled_features_and_params, + const std::vector<Feature>& disabled_features); + + // Initializes and registers a FeatureList instance based on present + // FeatureList and overridden with single enabled feature and associated field + // trial override. + // |trial| is expected to outlive the ScopedFeatureList. + void InitAndEnableFeatureWithFieldTrialOverride(const Feature& feature, + FieldTrial* trial); + + bool init_called_ = false; + std::unique_ptr<FeatureList> original_feature_list_; + base::FieldTrialList* original_field_trial_list_; + std::string original_params_; + std::unique_ptr<base::FieldTrialList> field_trial_list_; + + DISALLOW_COPY_AND_ASSIGN(ScopedFeatureList); +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_SCOPED_FEATURE_LIST_H_ diff --git a/chromium/base/test/scoped_feature_list_unittest.cc b/chromium/base/test/scoped_feature_list_unittest.cc new file mode 100644 index 00000000000..53a7bde390a --- /dev/null +++ b/chromium/base/test/scoped_feature_list_unittest.cc @@ -0,0 +1,430 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_feature_list.h" + +#include <map> +#include <string> +#include <utility> + +#include "base/metrics/field_trial.h" +#include "base/metrics/field_trial_params.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace test { + +namespace { + +const Feature kTestFeature1{"TestFeature1", FEATURE_DISABLED_BY_DEFAULT}; +const Feature kTestFeature2{"TestFeature2", FEATURE_DISABLED_BY_DEFAULT}; + +void ExpectFeatures(const std::string& enabled_features, + const std::string& disabled_features) { + FeatureList* list = FeatureList::GetInstance(); + std::string actual_enabled_features; + std::string actual_disabled_features; + + list->GetFeatureOverrides(&actual_enabled_features, + &actual_disabled_features); + + EXPECT_EQ(enabled_features, actual_enabled_features); + EXPECT_EQ(disabled_features, actual_disabled_features); +} + +} // namespace + +class ScopedFeatureListTest : public testing::Test { + public: + ScopedFeatureListTest() { + // Clear default feature list. + std::unique_ptr<FeatureList> feature_list(new FeatureList); + feature_list->InitializeFromCommandLine(std::string(), std::string()); + original_feature_list_ = FeatureList::ClearInstanceForTesting(); + FeatureList::SetInstance(std::move(feature_list)); + } + + ~ScopedFeatureListTest() override { + // Restore feature list. + if (original_feature_list_) { + FeatureList::ClearInstanceForTesting(); + FeatureList::RestoreInstanceForTesting(std::move(original_feature_list_)); + } + } + + private: + // Save the present FeatureList and restore it after test finish. + std::unique_ptr<FeatureList> original_feature_list_; + + DISALLOW_COPY_AND_ASSIGN(ScopedFeatureListTest); +}; + +TEST_F(ScopedFeatureListTest, BasicScoped) { + ExpectFeatures(std::string(), std::string()); + EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature1)); + { + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("TestFeature1", std::string()); + ExpectFeatures("TestFeature1", std::string()); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + } + ExpectFeatures(std::string(), std::string()); + EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature1)); +} + +TEST_F(ScopedFeatureListTest, EnableWithFeatureParameters) { + const char kParam1[] = "param_1"; + const char kParam2[] = "param_2"; + const char kValue1[] = "value_1"; + const char kValue2[] = "value_2"; + std::map<std::string, std::string> parameters; + parameters[kParam1] = kValue1; + parameters[kParam2] = kValue2; + + ExpectFeatures(std::string(), std::string()); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam1)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam2)); + FieldTrial::ActiveGroups active_groups; + FieldTrialList::GetActiveFieldTrialGroups(&active_groups); + EXPECT_EQ(0u, active_groups.size()); + + { + test::ScopedFeatureList feature_list; + + feature_list.InitAndEnableFeatureWithParameters(kTestFeature1, parameters); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_EQ(kValue1, + GetFieldTrialParamValueByFeature(kTestFeature1, kParam1)); + EXPECT_EQ(kValue2, + GetFieldTrialParamValueByFeature(kTestFeature1, kParam2)); + active_groups.clear(); + FieldTrialList::GetActiveFieldTrialGroups(&active_groups); + EXPECT_EQ(1u, active_groups.size()); + } + + ExpectFeatures(std::string(), std::string()); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam1)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam2)); + active_groups.clear(); + FieldTrialList::GetActiveFieldTrialGroups(&active_groups); + EXPECT_EQ(0u, active_groups.size()); +} + +TEST_F(ScopedFeatureListTest, OverrideWithFeatureParameters) { + scoped_refptr<FieldTrial> trial = + FieldTrialList::CreateFieldTrial("foo", "bar"); + const char kParam[] = "param_1"; + const char kValue[] = "value_1"; + std::map<std::string, std::string> parameters; + parameters[kParam] = kValue; + + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("TestFeature1<foo,TestFeature2", + std::string()); + + // Check initial state. + ExpectFeatures("TestFeature1<foo,TestFeature2", std::string()); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + + { + // Override feature with existing field trial. + test::ScopedFeatureList feature_list2; + + feature_list2.InitAndEnableFeatureWithParameters(kTestFeature1, parameters); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + EXPECT_NE(trial.get(), FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_NE(nullptr, FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + } + + // Check that initial state is restored. + ExpectFeatures("TestFeature1<foo,TestFeature2", std::string()); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + + { + // Override feature with no existing field trial. + test::ScopedFeatureList feature_list2; + + feature_list2.InitAndEnableFeatureWithParameters(kTestFeature2, parameters); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + EXPECT_EQ(trial.get()->trial_name(), + FeatureList::GetFieldTrial(kTestFeature1)->trial_name()); + EXPECT_EQ(trial.get()->group_name(), + FeatureList::GetFieldTrial(kTestFeature1)->group_name()); + EXPECT_NE(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + } + + // Check that initial state is restored. + ExpectFeatures("TestFeature1<foo,TestFeature2", std::string()); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(trial.get(), FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); +} + +TEST_F(ScopedFeatureListTest, OverrideMultipleFeaturesWithParameters) { + scoped_refptr<FieldTrial> trial1 = + FieldTrialList::CreateFieldTrial("foo1", "bar1"); + const char kParam[] = "param_1"; + const char kValue1[] = "value_1"; + const char kValue2[] = "value_2"; + std::map<std::string, std::string> parameters1; + parameters1[kParam] = kValue1; + std::map<std::string, std::string> parameters2; + parameters2[kParam] = kValue2; + + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("TestFeature1<foo1,TestFeature2", + std::string()); + + // Check initial state. + ExpectFeatures("TestFeature1<foo1,TestFeature2", std::string()); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(trial1.get(), FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + + { + // Override multiple features with parameters. + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeaturesAndParameters( + {{kTestFeature1, parameters1}, {kTestFeature2, parameters2}}, {}); + + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(kValue1, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ(kValue2, GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + } + + { + // Override a feature with a parameter and disable another one. + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeaturesAndParameters({{kTestFeature1, parameters2}}, + {kTestFeature2}); + + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(kValue2, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + } + + // Check that initial state is restored. + ExpectFeatures("TestFeature1<foo1,TestFeature2", std::string()); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_TRUE(FeatureList::IsEnabled(kTestFeature2)); + EXPECT_EQ(trial1.get(), FeatureList::GetFieldTrial(kTestFeature1)); + EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kTestFeature2)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + EXPECT_EQ("", GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); +} + +TEST_F(ScopedFeatureListTest, ParamsWithSpecialCharsPreserved) { + // Check that special characters in param names and values are preserved. + const char kParam[] = ";_\\<:>/_!?"; + const char kValue[] = ",;:/'!?"; + FieldTrialParams params0 = {{kParam, kValue}}; + + test::ScopedFeatureList feature_list0; + feature_list0.InitWithFeaturesAndParameters({{kTestFeature1, params0}}, {}); + EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + + { + const char kValue1[] = "normal"; + FieldTrialParams params1 = {{kParam, kValue1}}; + test::ScopedFeatureList feature_list1; + feature_list1.InitWithFeaturesAndParameters({{kTestFeature1, params1}}, {}); + + EXPECT_EQ(kValue1, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + } + EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + + { + const char kValue2[] = "[<(2)>]"; + FieldTrialParams params2 = {{kParam, kValue2}}; + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeaturesAndParameters({{kTestFeature2, params2}}, {}); + + EXPECT_EQ(kValue2, GetFieldTrialParamValueByFeature(kTestFeature2, kParam)); + EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + } + EXPECT_EQ(kValue, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); +} + +TEST_F(ScopedFeatureListTest, ParamsWithEmptyValue) { + const char kParam[] = "p"; + const char kEmptyValue[] = ""; + FieldTrialParams params = {{kParam, kEmptyValue}}; + + test::ScopedFeatureList feature_list0; + feature_list0.InitWithFeaturesAndParameters({{kTestFeature1, params}}, {}); + EXPECT_EQ(kEmptyValue, + GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + { + const char kValue1[] = "normal"; + FieldTrialParams params1 = {{kParam, kValue1}}; + test::ScopedFeatureList feature_list1; + feature_list1.InitWithFeaturesAndParameters({{kTestFeature1, params1}}, {}); + + EXPECT_EQ(kValue1, GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); + } + EXPECT_EQ(kEmptyValue, + GetFieldTrialParamValueByFeature(kTestFeature1, kParam)); +} + +TEST_F(ScopedFeatureListTest, EnableFeatureOverrideDisable) { + test::ScopedFeatureList feature_list1; + feature_list1.InitWithFeatures({}, {kTestFeature1}); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({kTestFeature1}, {}); + ExpectFeatures("TestFeature1", std::string()); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideNotMakeDuplicate) { + test::ScopedFeatureList feature_list1; + feature_list1.InitWithFeatures({}, {kTestFeature1}); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({}, {kTestFeature1}); + ExpectFeatures(std::string(), "TestFeature1"); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithDefault) { + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("*TestFeature1", std::string()); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({kTestFeature1}, {}); + ExpectFeatures("TestFeature1", std::string()); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithDefault2) { + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("*TestFeature1", std::string()); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({}, {kTestFeature1}); + ExpectFeatures(std::string(), "TestFeature1"); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithEnabledFieldTrial) { + test::ScopedFeatureList feature_list1; + + std::unique_ptr<FeatureList> feature_list(new FeatureList); + FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample", "A"); + feature_list->RegisterFieldTrialOverride( + kTestFeature1.name, FeatureList::OVERRIDE_ENABLE_FEATURE, trial); + feature_list1.InitWithFeatureList(std::move(feature_list)); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({kTestFeature1}, {}); + ExpectFeatures("TestFeature1", std::string()); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideFeatureWithDisabledFieldTrial) { + test::ScopedFeatureList feature_list1; + + std::unique_ptr<FeatureList> feature_list(new FeatureList); + FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample", "A"); + feature_list->RegisterFieldTrialOverride( + kTestFeature1.name, FeatureList::OVERRIDE_DISABLE_FEATURE, trial); + feature_list1.InitWithFeatureList(std::move(feature_list)); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({kTestFeature1}, {}); + ExpectFeatures("TestFeature1", std::string()); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideKeepsOtherExistingFeature) { + test::ScopedFeatureList feature_list1; + feature_list1.InitWithFeatures({}, {kTestFeature1}); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({}, {kTestFeature2}); + EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature1)); + EXPECT_FALSE(FeatureList::IsEnabled(kTestFeature2)); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideKeepsOtherExistingFeature2) { + test::ScopedFeatureList feature_list1; + feature_list1.InitWithFeatures({}, {kTestFeature1}); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({kTestFeature2}, {}); + ExpectFeatures("TestFeature2", "TestFeature1"); + } +} + +TEST_F(ScopedFeatureListTest, FeatureOverrideKeepsOtherExistingDefaultFeature) { + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("*TestFeature1", std::string()); + + { + test::ScopedFeatureList feature_list2; + feature_list2.InitWithFeatures({}, {kTestFeature2}); + ExpectFeatures("*TestFeature1", "TestFeature2"); + } +} + +TEST_F(ScopedFeatureListTest, ScopedFeatureListIsNoopWhenNotInitialized) { + test::ScopedFeatureList feature_list1; + feature_list1.InitFromCommandLine("*TestFeature1", std::string()); + + // A ScopedFeatureList on which Init() is not called should not reset things + // when going out of scope. + { test::ScopedFeatureList feature_list2; } + + ExpectFeatures("*TestFeature1", std::string()); +} + +TEST(ScopedFeatureListTestWithMemberList, ScopedFeatureListLocalOverride) { + test::ScopedFeatureList initial_feature_list; + initial_feature_list.InitAndDisableFeature(kTestFeature1); + { + base::test::ScopedFeatureList scoped_features; + scoped_features.InitAndEnableFeatureWithParameters(kTestFeature1, + {{"mode", "nobugs"}}); + ASSERT_TRUE(FeatureList::IsEnabled(kTestFeature1)); + } +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_field_trial_list_resetter.cc b/chromium/base/test/scoped_field_trial_list_resetter.cc new file mode 100644 index 00000000000..0bc657b8229 --- /dev/null +++ b/chromium/base/test/scoped_field_trial_list_resetter.cc @@ -0,0 +1,21 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_field_trial_list_resetter.h" + +#include "base/metrics/field_trial.h" + +namespace base { +namespace test { + +ScopedFieldTrialListResetter::ScopedFieldTrialListResetter() + : original_field_trial_list_( + base::FieldTrialList::BackupInstanceForTesting()) {} + +ScopedFieldTrialListResetter::~ScopedFieldTrialListResetter() { + base::FieldTrialList::RestoreInstanceForTesting(original_field_trial_list_); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_field_trial_list_resetter.h b/chromium/base/test/scoped_field_trial_list_resetter.h new file mode 100644 index 00000000000..d7d2dcd35e9 --- /dev/null +++ b/chromium/base/test/scoped_field_trial_list_resetter.h @@ -0,0 +1,36 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_FIELD_TRIAL_LIST_RESETTER_H_ +#define BASE_TEST_SCOPED_FIELD_TRIAL_LIST_RESETTER_H_ + +namespace base { + +class FieldTrialList; + +namespace test { + +// DISCLAIMER: Please use ScopedFeatureList except for advanced cases where +// custom instantiation of FieldTrialList is required. +// +// ScopedFieldTrialListResetter resets the global FieldTrialList instance to +// null, and restores the original state when the class goes out of scope. This +// allows client code to initialize FieldTrialList instances in a custom +// fashion. +class ScopedFieldTrialListResetter final { + public: + ScopedFieldTrialListResetter(); + ScopedFieldTrialListResetter(const ScopedFieldTrialListResetter&) = delete; + ScopedFieldTrialListResetter(ScopedFieldTrialListResetter&&) = delete; + + ~ScopedFieldTrialListResetter(); + + private: + base::FieldTrialList* const original_field_trial_list_; +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_SCOPED_FIELD_TRIAL_LIST_RESETTER_H_ diff --git a/chromium/base/test/scoped_locale.cc b/chromium/base/test/scoped_locale.cc new file mode 100644 index 00000000000..c0182842b6d --- /dev/null +++ b/chromium/base/test/scoped_locale.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_locale.h" + +#include <locale.h> + +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +ScopedLocale::ScopedLocale(const std::string& locale) { + prev_locale_ = setlocale(LC_ALL, nullptr); + EXPECT_TRUE(setlocale(LC_ALL, locale.c_str()) != nullptr) + << "Failed to set locale: " << locale; +} + +ScopedLocale::~ScopedLocale() { + EXPECT_STREQ(prev_locale_.c_str(), setlocale(LC_ALL, prev_locale_.c_str())); +} + +} // namespace base diff --git a/chromium/base/test/scoped_locale.h b/chromium/base/test/scoped_locale.h new file mode 100644 index 00000000000..ef64e98f8eb --- /dev/null +++ b/chromium/base/test/scoped_locale.h @@ -0,0 +1,29 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_LOCALE_H_ +#define BASE_TEST_SCOPED_LOCALE_H_ + +#include <string> + +#include "base/macros.h" + +namespace base { + +// Sets the given |locale| on construction, and restores the previous locale +// on destruction. +class ScopedLocale { + public: + explicit ScopedLocale(const std::string& locale); + ~ScopedLocale(); + + private: + std::string prev_locale_; + + DISALLOW_COPY_AND_ASSIGN(ScopedLocale); +}; + +} // namespace base + +#endif // BASE_TEST_SCOPED_LOCALE_H_ diff --git a/chromium/base/test/scoped_mock_clock_override.cc b/chromium/base/test/scoped_mock_clock_override.cc new file mode 100644 index 00000000000..46cc88437cd --- /dev/null +++ b/chromium/base/test/scoped_mock_clock_override.cc @@ -0,0 +1,46 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_mock_clock_override.h" + +namespace base { + +ScopedMockClockOverride* ScopedMockClockOverride::scoped_mock_clock_ = nullptr; + +ScopedMockClockOverride::ScopedMockClockOverride() + : // Start the offset past zero so that it's not treated as a null value. + offset_(TimeDelta::FromDays(365)) { + DCHECK(!scoped_mock_clock_) + << "Nested ScopedMockClockOverrides are not supported."; + + scoped_mock_clock_ = this; + + time_clock_overrides_ = std::make_unique<subtle::ScopedTimeClockOverrides>( + &ScopedMockClockOverride::Now, &ScopedMockClockOverride::NowTicks, + &ScopedMockClockOverride::NowThreadTicks); +} + +ScopedMockClockOverride::~ScopedMockClockOverride() { + scoped_mock_clock_ = nullptr; +} + +Time ScopedMockClockOverride::Now() { + return Time() + scoped_mock_clock_->offset_; +} + +TimeTicks ScopedMockClockOverride::NowTicks() { + return TimeTicks() + scoped_mock_clock_->offset_; +} + +ThreadTicks ScopedMockClockOverride::NowThreadTicks() { + return ThreadTicks() + scoped_mock_clock_->offset_; +} + +void ScopedMockClockOverride::Advance(TimeDelta delta) { + DCHECK_GT(delta, base::TimeDelta()) + << "Monotonically increasing time may not go backwards"; + offset_ += delta; +} + +} // namespace base diff --git a/chromium/base/test/scoped_mock_clock_override.h b/chromium/base/test/scoped_mock_clock_override.h new file mode 100644 index 00000000000..9f7a7e5d5a6 --- /dev/null +++ b/chromium/base/test/scoped_mock_clock_override.h @@ -0,0 +1,54 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_MOCK_CLOCK_OVERRIDE_H_ +#define BASE_TEST_SCOPED_MOCK_CLOCK_OVERRIDE_H_ + +#include <memory> + +#include "base/macros.h" +#include "base/time/time.h" +#include "base/time/time_override.h" + +namespace base { + +// Override the return value of Time::Now(), Time::NowFromSystemTime(), +// TimeTicks::Now(), and ThreadTicks::Now() through a simple advanceable clock. +// +// This utility is intended to support tests that: +// +// - Depend on large existing codebases that call TimeXYZ::Now() directly or +// - Have no ability to inject a TickClock into the code getting the time +// (e.g. integration tests in which a TickClock would be several layers +// removed from the test code) +// +// For new unit tests, developers are highly encouraged to structure new code +// around a dependency injected base::Clock, base::TickClock, etc. to be able +// to supply a mock time in tests without a global override. +// +// NOTE: ScopedMockClockOverride should be created while single-threaded and +// before the first call to Now() to avoid threading issues and inconsistencies +// in returned values. Nested overrides are not allowed. +class ScopedMockClockOverride { + public: + ScopedMockClockOverride(); + ~ScopedMockClockOverride(); + + static Time Now(); + static TimeTicks NowTicks(); + static ThreadTicks NowThreadTicks(); + + void Advance(TimeDelta delta); + + private: + std::unique_ptr<base::subtle::ScopedTimeClockOverrides> time_clock_overrides_; + TimeDelta offset_; + static ScopedMockClockOverride* scoped_mock_clock_; + + DISALLOW_COPY_AND_ASSIGN(ScopedMockClockOverride); +}; + +} // namespace base + +#endif // BASE_TEST_SCOPED_MOCK_CLOCK_OVERRIDE_H_ diff --git a/chromium/base/test/scoped_mock_clock_override_unittest.cc b/chromium/base/test/scoped_mock_clock_override_unittest.cc new file mode 100644 index 00000000000..ab935e07db9 --- /dev/null +++ b/chromium/base/test/scoped_mock_clock_override_unittest.cc @@ -0,0 +1,104 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_mock_clock_override.h" + +#include "base/build_time.h" +#include "base/time/time.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +namespace { + +#if defined(OS_FUCHSIA) +// TODO(https://crbug.com/1060357): Enable when RTC flake is fixed. +#define MAYBE_Time DISABLED_Time +#else +#define MAYBE_Time Time +#endif + +TEST(ScopedMockClockOverrideTest, MAYBE_Time) { + // Choose a reference time that we know to be in the past but close to now. + Time build_time = GetBuildTime(); + + // Override is not active. All Now() methods should return a time greater than + // the build time. + EXPECT_LT(build_time, Time::Now()); + EXPECT_GT(Time::Max(), Time::Now()); + EXPECT_LT(build_time, Time::NowFromSystemTime()); + EXPECT_GT(Time::Max(), Time::NowFromSystemTime()); + + { + // Set override. + ScopedMockClockOverride mock_clock; + + EXPECT_NE(Time(), Time::Now()); + Time start = Time::Now(); + mock_clock.Advance(TimeDelta::FromSeconds(1)); + EXPECT_EQ(start + TimeDelta::FromSeconds(1), Time::Now()); + } + + // All methods return real time again. + EXPECT_LT(build_time, Time::Now()); + EXPECT_GT(Time::Max(), Time::Now()); + EXPECT_LT(build_time, Time::NowFromSystemTime()); + EXPECT_GT(Time::Max(), Time::NowFromSystemTime()); +} + +TEST(ScopedMockClockOverrideTest, TimeTicks) { + // Override is not active. All Now() methods should return a sensible value. + EXPECT_LT(TimeTicks::UnixEpoch(), TimeTicks::Now()); + EXPECT_GT(TimeTicks::Max(), TimeTicks::Now()); + EXPECT_LT(TimeTicks::UnixEpoch() + TimeDelta::FromDays(365), + TimeTicks::Now()); + + { + // Set override. + ScopedMockClockOverride mock_clock; + + EXPECT_NE(TimeTicks(), TimeTicks::Now()); + TimeTicks start = TimeTicks::Now(); + mock_clock.Advance(TimeDelta::FromSeconds(1)); + EXPECT_EQ(start + TimeDelta::FromSeconds(1), TimeTicks::Now()); + } + + // All methods return real ticks again. + EXPECT_LT(TimeTicks::UnixEpoch(), TimeTicks::Now()); + EXPECT_GT(TimeTicks::Max(), TimeTicks::Now()); + EXPECT_LT(TimeTicks::UnixEpoch() + TimeDelta::FromDays(365), + TimeTicks::Now()); +} + +TEST(ScopedMockClockOverrideTest, ThreadTicks) { + if (ThreadTicks::IsSupported()) { + ThreadTicks::WaitUntilInitialized(); + + // Override is not active. All Now() methods should return a sensible value. + ThreadTicks initial_thread_ticks = ThreadTicks::Now(); + EXPECT_LE(initial_thread_ticks, ThreadTicks::Now()); + EXPECT_GT(ThreadTicks::Max(), ThreadTicks::Now()); + EXPECT_LT(ThreadTicks(), ThreadTicks::Now()); + + { + // Set override. + ScopedMockClockOverride mock_clock; + + EXPECT_NE(ThreadTicks(), ThreadTicks::Now()); + ThreadTicks start = ThreadTicks::Now(); + mock_clock.Advance(TimeDelta::FromSeconds(1)); + EXPECT_EQ(start + TimeDelta::FromSeconds(1), ThreadTicks::Now()); + } + + // All methods return real ticks again. + EXPECT_LE(initial_thread_ticks, ThreadTicks::Now()); + EXPECT_GT(ThreadTicks::Max(), ThreadTicks::Now()); + EXPECT_LT(ThreadTicks(), ThreadTicks::Now()); + } +} + +} // namespace + +} // namespace base diff --git a/chromium/base/test/scoped_mock_time_message_loop_task_runner.cc b/chromium/base/test/scoped_mock_time_message_loop_task_runner.cc new file mode 100644 index 00000000000..0ace2923566 --- /dev/null +++ b/chromium/base/test/scoped_mock_time_message_loop_task_runner.cc @@ -0,0 +1,38 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_mock_time_message_loop_task_runner.h" + +#include "base/bind.h" +#include "base/check_op.h" +#include "base/message_loop/message_loop_current.h" +#include "base/run_loop.h" +#include "base/test/test_pending_task.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/time/time.h" + +namespace base { + +ScopedMockTimeMessageLoopTaskRunner::ScopedMockTimeMessageLoopTaskRunner() + : task_runner_(new TestMockTimeTaskRunner), + previous_task_runner_(ThreadTaskRunnerHandle::Get()) { + DCHECK(MessageLoopCurrent::Get()); + // To ensure that we process any initialization tasks posted to the + // MessageLoop by a test fixture before replacing its TaskRunner. + RunLoop().RunUntilIdle(); + MessageLoopCurrent::Get()->SetTaskRunner(task_runner_); +} + +ScopedMockTimeMessageLoopTaskRunner::~ScopedMockTimeMessageLoopTaskRunner() { + DCHECK(previous_task_runner_->RunsTasksInCurrentSequence()); + DCHECK_EQ(task_runner_, ThreadTaskRunnerHandle::Get()); + for (auto& pending_task : task_runner_->TakePendingTasks()) { + previous_task_runner_->PostDelayedTask( + pending_task.location, std::move(pending_task.task), + pending_task.GetTimeToRun() - task_runner_->NowTicks()); + } + MessageLoopCurrent::Get()->SetTaskRunner(std::move(previous_task_runner_)); +} + +} // namespace base diff --git a/chromium/base/test/scoped_mock_time_message_loop_task_runner.h b/chromium/base/test/scoped_mock_time_message_loop_task_runner.h new file mode 100644 index 00000000000..b671304b295 --- /dev/null +++ b/chromium/base/test/scoped_mock_time_message_loop_task_runner.h @@ -0,0 +1,45 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_MOCK_TIME_MESSAGE_LOOP_TASK_RUNNER_H_ +#define BASE_TEST_SCOPED_MOCK_TIME_MESSAGE_LOOP_TASK_RUNNER_H_ + +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/test/test_mock_time_task_runner.h" + +namespace base { + +class SingleThreadTaskRunner; + +// A scoped wrapper around TestMockTimeTaskRunner that replaces +// MessageLoopCurrent::Get()'s task runner (and consequently +// ThreadTaskRunnerHandle) with a TestMockTimeTaskRunner and resets it back at +// the end of its scope. +// +// Note: RunLoop() will not work in the scope of a +// ScopedMockTimeMessageLoopTaskRunner, the underlying TestMockTimeTaskRunner's +// methods must be used instead to pump tasks. +// +// Note: Use TaskEnvironment + TimeSource::MOCK_TIME instead of this in unit +// tests. In browser tests you unfortunately still need this at the moment to +// mock delayed tasks on the main thread... +class ScopedMockTimeMessageLoopTaskRunner { + public: + ScopedMockTimeMessageLoopTaskRunner(); + ~ScopedMockTimeMessageLoopTaskRunner(); + + TestMockTimeTaskRunner* task_runner() { return task_runner_.get(); } + TestMockTimeTaskRunner* operator->() { return task_runner_.get(); } + + private: + const scoped_refptr<TestMockTimeTaskRunner> task_runner_; + scoped_refptr<SingleThreadTaskRunner> previous_task_runner_; + + DISALLOW_COPY_AND_ASSIGN(ScopedMockTimeMessageLoopTaskRunner); +}; + +} // namespace base + +#endif // BASE_TEST_SCOPED_MOCK_TIME_MESSAGE_LOOP_TASK_RUNNER_H_ diff --git a/chromium/base/test/scoped_mock_time_message_loop_task_runner_unittest.cc b/chromium/base/test/scoped_mock_time_message_loop_task_runner_unittest.cc new file mode 100644 index 00000000000..f2f34b802d6 --- /dev/null +++ b/chromium/base/test/scoped_mock_time_message_loop_task_runner_unittest.cc @@ -0,0 +1,125 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_mock_time_message_loop_task_runner.h" + +#include <memory> + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/callback_forward.h" +#include "base/containers/circular_deque.h" +#include "base/macros.h" +#include "base/memory/ptr_util.h" +#include "base/memory/ref_counted.h" +#include "base/message_loop/message_loop_current.h" +#include "base/test/task_environment.h" +#include "base/test/test_mock_time_task_runner.h" +#include "base/test/test_pending_task.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/time/time.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace { + +TaskRunner* GetCurrentTaskRunner() { + return ThreadTaskRunnerHandle::Get().get(); +} + +void AssignTrue(bool* out) { + *out = true; +} + +// Pops a task from the front of |pending_tasks| and returns it. +TestPendingTask PopFront(base::circular_deque<TestPendingTask>* pending_tasks) { + TestPendingTask task = std::move(pending_tasks->front()); + pending_tasks->pop_front(); + return task; +} + +class ScopedMockTimeMessageLoopTaskRunnerTest : public testing::Test { + public: + ScopedMockTimeMessageLoopTaskRunnerTest() + : original_task_runner_(new TestMockTimeTaskRunner()) { + MessageLoopCurrent::Get()->SetTaskRunner(original_task_runner_); + } + + protected: + TestMockTimeTaskRunner* original_task_runner() { + return original_task_runner_.get(); + } + + private: + scoped_refptr<TestMockTimeTaskRunner> original_task_runner_; + + test::SingleThreadTaskEnvironment task_environment_; + + DISALLOW_COPY_AND_ASSIGN(ScopedMockTimeMessageLoopTaskRunnerTest); +}; + +// Verifies a new TaskRunner is installed while a +// ScopedMockTimeMessageLoopTaskRunner exists and the previous one is installed +// after destruction. +TEST_F(ScopedMockTimeMessageLoopTaskRunnerTest, CurrentTaskRunners) { + auto scoped_task_runner_ = + std::make_unique<ScopedMockTimeMessageLoopTaskRunner>(); + EXPECT_EQ(scoped_task_runner_->task_runner(), GetCurrentTaskRunner()); + scoped_task_runner_.reset(); + EXPECT_EQ(original_task_runner(), GetCurrentTaskRunner()); +} + +TEST_F(ScopedMockTimeMessageLoopTaskRunnerTest, + IncompleteTasksAreCopiedToPreviousTaskRunnerAfterDestruction) { + auto scoped_task_runner_ = + std::make_unique<ScopedMockTimeMessageLoopTaskRunner>(); + + bool task_10_has_run = false; + bool task_11_has_run = false; + + OnceClosure task_1 = DoNothing(); + OnceClosure task_2 = DoNothing(); + OnceClosure task_10 = BindOnce(&AssignTrue, &task_10_has_run); + OnceClosure task_11 = BindOnce(&AssignTrue, &task_11_has_run); + + constexpr TimeDelta task_1_delay = TimeDelta::FromSeconds(1); + constexpr TimeDelta task_2_delay = TimeDelta::FromSeconds(2); + constexpr TimeDelta task_10_delay = TimeDelta::FromSeconds(10); + constexpr TimeDelta task_11_delay = TimeDelta::FromSeconds(11); + + constexpr TimeDelta step_time_by = TimeDelta::FromSeconds(5); + + GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, std::move(task_1), + task_1_delay); + GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, std::move(task_2), + task_2_delay); + GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, std::move(task_10), + task_10_delay); + GetCurrentTaskRunner()->PostDelayedTask(FROM_HERE, std::move(task_11), + task_11_delay); + + scoped_task_runner_->task_runner()->FastForwardBy(step_time_by); + + scoped_task_runner_.reset(); + + base::circular_deque<TestPendingTask> pending_tasks = + original_task_runner()->TakePendingTasks(); + + EXPECT_EQ(2U, pending_tasks.size()); + + TestPendingTask pending_task = PopFront(&pending_tasks); + EXPECT_FALSE(task_10_has_run); + std::move(pending_task.task).Run(); + EXPECT_TRUE(task_10_has_run); + EXPECT_EQ(task_10_delay - step_time_by, pending_task.delay); + + pending_task = PopFront(&pending_tasks); + EXPECT_FALSE(task_11_has_run); + std::move(pending_task.task).Run(); + EXPECT_TRUE(task_11_has_run); + EXPECT_EQ(task_11_delay - step_time_by, pending_task.delay); +} + +} // namespace +} // namespace base diff --git a/chromium/base/test/scoped_os_info_override_win.cc b/chromium/base/test/scoped_os_info_override_win.cc new file mode 100644 index 00000000000..7415c13234c --- /dev/null +++ b/chromium/base/test/scoped_os_info_override_win.cc @@ -0,0 +1,126 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_os_info_override_win.h" + +#include <windows.h> + +#include "base/win/windows_version.h" + +namespace base { +namespace test { + +ScopedOSInfoOverride::ScopedOSInfoOverride(Type type) + : original_info_(base::win::OSInfo::GetInstance()), + overriding_info_(CreateInfoOfType(type)) { + *base::win::OSInfo::GetInstanceStorage() = overriding_info_.get(); +} + +ScopedOSInfoOverride::~ScopedOSInfoOverride() { + *base::win::OSInfo::GetInstanceStorage() = original_info_; +} + +// static +ScopedOSInfoOverride::UniqueOsInfo ScopedOSInfoOverride::CreateInfoOfType( + Type type) { + _OSVERSIONINFOEXW version_info = {sizeof(version_info)}; + _SYSTEM_INFO system_info = {}; + int os_type = 0; + + switch (type) { + case Type::kWin10Pro: + case Type::kWin10Home: + version_info.dwMajorVersion = 10; + version_info.dwMinorVersion = 0; + version_info.dwBuildNumber = 15063; + version_info.wServicePackMajor = 0; + version_info.wServicePackMinor = 0; + version_info.szCSDVersion[0] = 0; + version_info.wProductType = VER_NT_WORKSTATION; + version_info.wSuiteMask = VER_SUITE_PERSONAL; + + system_info.wProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64; + system_info.dwNumberOfProcessors = 1; + system_info.dwAllocationGranularity = 8; + + os_type = + type == Type::kWin10Home ? PRODUCT_HOME_BASIC : PRODUCT_PROFESSIONAL; + break; + case Type::kWinServer2016: + version_info.dwMajorVersion = 10; + version_info.dwMinorVersion = 0; + version_info.dwBuildNumber = 17134; + version_info.wServicePackMajor = 0; + version_info.wServicePackMinor = 0; + version_info.szCSDVersion[0] = 0; + version_info.wProductType = VER_NT_SERVER; + version_info.wSuiteMask = VER_SUITE_ENTERPRISE; + + system_info.wProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64; + system_info.dwNumberOfProcessors = 4; + system_info.dwAllocationGranularity = 64 * 1024; + + os_type = PRODUCT_STANDARD_SERVER; + break; + case Type::kWin81Pro: + version_info.dwMajorVersion = 6; + version_info.dwMinorVersion = 3; + version_info.dwBuildNumber = 9600; + version_info.wServicePackMajor = 0; + version_info.wServicePackMinor = 0; + version_info.szCSDVersion[0] = 0; + version_info.wProductType = VER_NT_WORKSTATION; + version_info.wSuiteMask = VER_SUITE_PERSONAL; + + system_info.wProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64; + system_info.dwNumberOfProcessors = 1; + system_info.dwAllocationGranularity = 64 * 1024; + + os_type = PRODUCT_PROFESSIONAL; + break; + case Type::kWinServer2012R2: + version_info.dwMajorVersion = 6; + version_info.dwMinorVersion = 3; + version_info.dwBuildNumber = 9600; + version_info.wServicePackMajor = 0; + version_info.wServicePackMinor = 0; + version_info.szCSDVersion[0] = 0; + version_info.wProductType = VER_NT_SERVER; + version_info.wSuiteMask = VER_SUITE_ENTERPRISE; + + system_info.wProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64; + system_info.dwNumberOfProcessors = 2; + system_info.dwAllocationGranularity = 64 * 1024; + + os_type = PRODUCT_STANDARD_SERVER; + break; + case Type::kWin7ProSP1: + version_info.dwMajorVersion = 6; + version_info.dwMinorVersion = 1; + version_info.dwBuildNumber = 7601; + version_info.wServicePackMajor = 1; + version_info.wServicePackMinor = 0; + wcscpy_s(version_info.szCSDVersion, L"Service Pack 1"); + version_info.wProductType = VER_NT_WORKSTATION; + version_info.wSuiteMask = VER_SUITE_PERSONAL; + + system_info.wProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64; + system_info.dwNumberOfProcessors = 1; + system_info.dwAllocationGranularity = 64 * 1024; + + os_type = PRODUCT_PROFESSIONAL; + break; + } + + return UniqueOsInfo(new base::win::OSInfo(version_info, system_info, os_type), + &ScopedOSInfoOverride::deleter); +} + +// static +void ScopedOSInfoOverride::deleter(base::win::OSInfo* info) { + delete info; +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_os_info_override_win.h b/chromium/base/test/scoped_os_info_override_win.h new file mode 100644 index 00000000000..07ae7a964f1 --- /dev/null +++ b/chromium/base/test/scoped_os_info_override_win.h @@ -0,0 +1,64 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_OS_INFO_OVERRIDE_WIN_H_ +#define BASE_TEST_SCOPED_OS_INFO_OVERRIDE_WIN_H_ + +#include <memory> + +#include "base/macros.h" + +namespace base { +namespace win { +class OSInfo; +} // namespace win +} // namespace base + +namespace base { +namespace test { + +// Helper class to override info returned by base::win::OSInfo::GetIntance() +// for the lifetime of this object. Upon destruction, the original info at time +// of object creation is restored. +class ScopedOSInfoOverride { + public: + // Types of windows machines that can be used for overriding. Add new + // machine types as needed. + enum class Type { + kWin10Pro, + kWin10Home, + kWinServer2016, + kWin81Pro, + kWinServer2012R2, + kWin7ProSP1, + }; + + explicit ScopedOSInfoOverride(Type type); + ~ScopedOSInfoOverride(); + + private: + using UniqueOsInfo = + std::unique_ptr<base::win::OSInfo, void (*)(base::win::OSInfo*)>; + + static UniqueOsInfo CreateInfoOfType(Type type); + + // The OSInfo taken by this instance at construction and restored at + // destruction. + base::win::OSInfo* original_info_; + + // The OSInfo owned by this scoped object and which overrides + // base::win::OSInfo::GetIntance() for the lifespan of the object. + UniqueOsInfo overriding_info_; + + // Because the dtor of OSInfo is private, a custom deleter is needed to use + // unique_ptr. + static void deleter(base::win::OSInfo* info); + + DISALLOW_COPY_AND_ASSIGN(ScopedOSInfoOverride); +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_SCOPED_OS_INFO_OVERRIDE_WIN_H_ diff --git a/chromium/base/test/scoped_path_override.cc b/chromium/base/test/scoped_path_override.cc new file mode 100644 index 00000000000..dc4a34089ce --- /dev/null +++ b/chromium/base/test/scoped_path_override.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_path_override.h" + +#include "base/check.h" +#include "base/path_service.h" + +namespace base { + +ScopedPathOverride::ScopedPathOverride(int key) : key_(key) { + bool result = temp_dir_.CreateUniqueTempDir(); + CHECK(result); + result = PathService::Override(key, temp_dir_.GetPath()); + CHECK(result); +} + +ScopedPathOverride::ScopedPathOverride(int key, const base::FilePath& dir) + : key_(key) { + bool result = PathService::Override(key, dir); + CHECK(result); +} + +ScopedPathOverride::ScopedPathOverride(int key, + const FilePath& path, + bool is_absolute, + bool create) + : key_(key) { + bool result = + PathService::OverrideAndCreateIfNeeded(key, path, is_absolute, create); + CHECK(result); +} + +ScopedPathOverride::~ScopedPathOverride() { + bool result = PathService::RemoveOverride(key_); + CHECK(result) << "The override seems to have been removed already!"; +} + +} // namespace base diff --git a/chromium/base/test/scoped_path_override.h b/chromium/base/test/scoped_path_override.h new file mode 100644 index 00000000000..f5891490b1c --- /dev/null +++ b/chromium/base/test/scoped_path_override.h @@ -0,0 +1,43 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_PATH_OVERRIDE_H_ +#define BASE_TEST_SCOPED_PATH_OVERRIDE_H_ + +#include "base/files/scoped_temp_dir.h" +#include "base/macros.h" + +namespace base { + +class FilePath; + +// Sets a path override on construction, and removes it when the object goes out +// of scope. This class is intended to be used by tests that need to override +// paths to ensure their overrides are properly handled and reverted when the +// scope of the test is left. +class ScopedPathOverride { + public: + // Contructor that initializes the override to a scoped temp directory. + explicit ScopedPathOverride(int key); + + // Constructor that would use a path provided by the user. + ScopedPathOverride(int key, const FilePath& dir); + + // See PathService::OverrideAndCreateIfNeeded. + ScopedPathOverride(int key, + const FilePath& path, + bool is_absolute, + bool create); + ~ScopedPathOverride(); + + private: + int key_; + ScopedTempDir temp_dir_; + + DISALLOW_COPY_AND_ASSIGN(ScopedPathOverride); +}; + +} // namespace base + +#endif // BASE_TEST_SCOPED_PATH_OVERRIDE_H_ diff --git a/chromium/base/test/scoped_run_loop_timeout.cc b/chromium/base/test/scoped_run_loop_timeout.cc new file mode 100644 index 00000000000..5158c5c4c84 --- /dev/null +++ b/chromium/base/test/scoped_run_loop_timeout.cc @@ -0,0 +1,94 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_run_loop_timeout.h" + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/location.h" +#include "base/strings/strcat.h" +#include "base/time/time.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace test { + +namespace { + +bool g_add_gtest_failure_on_timeout = false; + +std::string TimeoutMessage(const RepeatingCallback<std::string()>& get_log) { + std::string message = "RunLoop::Run() timed out."; + if (get_log) + StrAppend(&message, {"\n", get_log.Run()}); + return message; +} + +} // namespace + +ScopedRunLoopTimeout::ScopedRunLoopTimeout(const Location& from_here, + TimeDelta timeout) + : ScopedRunLoopTimeout(from_here, timeout, NullCallback()) {} + +ScopedRunLoopTimeout::~ScopedRunLoopTimeout() { + RunLoop::SetTimeoutForCurrentThread(nested_timeout_); +} + +ScopedRunLoopTimeout::ScopedRunLoopTimeout( + const Location& from_here, + TimeDelta timeout, + RepeatingCallback<std::string()> on_timeout_log) + : nested_timeout_(RunLoop::GetTimeoutForCurrentThread()) { + DCHECK_GT(timeout, TimeDelta()); + run_timeout_.timeout = timeout; + + if (g_add_gtest_failure_on_timeout) { + run_timeout_.on_timeout = BindRepeating( + [](const Location& from_here, + RepeatingCallback<std::string()> on_timeout_log) { + GTEST_FAIL_AT(from_here.file_name(), from_here.line_number()) + << TimeoutMessage(on_timeout_log); + }, + from_here, std::move(on_timeout_log)); + } else { + run_timeout_.on_timeout = BindRepeating( + [](const Location& from_here, + RepeatingCallback<std::string()> on_timeout_log) { + std::string message = TimeoutMessage(on_timeout_log); + logging::LogMessage(from_here.file_name(), from_here.line_number(), + message.data()); + }, + from_here, std::move(on_timeout_log)); + } + + RunLoop::SetTimeoutForCurrentThread(&run_timeout_); +} + +// static +bool ScopedRunLoopTimeout::ExistsForCurrentThread() { + return RunLoop::GetTimeoutForCurrentThread() != nullptr; +} + +// static +void ScopedRunLoopTimeout::SetAddGTestFailureOnTimeout() { + g_add_gtest_failure_on_timeout = true; +} + +// static +const RunLoop::RunLoopTimeout* +ScopedRunLoopTimeout::GetTimeoutForCurrentThread() { + return RunLoop::GetTimeoutForCurrentThread(); +} + +ScopedDisableRunLoopTimeout::ScopedDisableRunLoopTimeout() + : nested_timeout_(RunLoop::GetTimeoutForCurrentThread()) { + RunLoop::SetTimeoutForCurrentThread(nullptr); +} + +ScopedDisableRunLoopTimeout::~ScopedDisableRunLoopTimeout() { + RunLoop::SetTimeoutForCurrentThread(nested_timeout_); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/scoped_run_loop_timeout.h b/chromium/base/test/scoped_run_loop_timeout.h new file mode 100644 index 00000000000..12abd9f4e55 --- /dev/null +++ b/chromium/base/test/scoped_run_loop_timeout.h @@ -0,0 +1,107 @@ +// Copyright 2019 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SCOPED_RUN_LOOP_TIMEOUT_H_ +#define BASE_TEST_SCOPED_RUN_LOOP_TIMEOUT_H_ + +#include <string> + +#include "base/callback.h" +#include "base/location.h" +#include "base/run_loop.h" +#include "base/time/time.h" + +namespace content { +FORWARD_DECLARE_TEST(ContentBrowserTest, RunTimeoutInstalled); +} + +namespace base { +namespace test { + +FORWARD_DECLARE_TEST(TaskEnvironmentTest, SetsDefaultRunTimeout); + +// Configures all RunLoop::Run() calls on the current thread to run the +// supplied |on_timeout| callback if they run for longer than |timeout|. +// +// Specifying Run() timeouts per-thread avoids the need to cope with Run()s +// executing concurrently with ScopedRunLoopTimeout initialization or +// teardown, and allows "default" timeouts to be specified by suites, rather +// than explicitly configuring them for every RunLoop, in each test. +// +// This is used by test classes including TaskEnvironment and TestSuite to +// set a default Run() timeout on the main thread of all tests which use them. +// +// Tests which have steps which need to Run() for longer than their suite's +// default (if any) allows can override the active timeout by creating a nested +// ScopedRunLoopTimeout on their stack, e.g: +// +// ScopedRunLoopTimeout default_timeout(kDefaultRunTimeout); +// ... do other test stuff ... +// RunLoop().Run(); // Run for up to kDefaultRunTimeout. +// ... +// { +// ScopedRunLoopTimeout specific_timeout(kTestSpecificTimeout); +// RunLoop().Run(); // Run for up to kTestSpecificTimeout. +// } +// ... +// RunLoop().Run(); // Run for up to kDefaultRunTimeout. +// +// The currently-active timeout can also be temporarily disabled: +// ScopedDisableRunLoopTimeout disable_timeout; +// +// By default LOG(FATAL) will be invoked on Run() timeout. Test binaries +// can opt-in to using ADD_FAILURE() instead by calling +// SetAddGTestFailureOnTimeout() during process initialization. +// +// TaskEnvironment applies a default Run() timeout. + +class ScopedRunLoopTimeout { + public: + ScopedRunLoopTimeout(const Location& from_here, TimeDelta timeout); + ~ScopedRunLoopTimeout(); + + // Invokes |on_timeout_log| if |timeout| expires, and appends it to the + // logged error message. + ScopedRunLoopTimeout(const Location& from_here, + TimeDelta timeout, + RepeatingCallback<std::string()> on_timeout_log); + + ScopedRunLoopTimeout(const ScopedRunLoopTimeout&) = delete; + ScopedRunLoopTimeout& operator=(const ScopedRunLoopTimeout&) = delete; + + // Returns true if there is a Run() timeout configured on the current thread. + static bool ExistsForCurrentThread(); + + static void SetAddGTestFailureOnTimeout(); + + protected: + FRIEND_TEST_ALL_PREFIXES(ScopedRunLoopRunTimeoutTest, TimesOut); + FRIEND_TEST_ALL_PREFIXES(ScopedRunLoopRunTimeoutTest, RunTasksUntilTimeout); + FRIEND_TEST_ALL_PREFIXES(TaskEnvironmentTest, SetsDefaultRunTimeout); + FRIEND_TEST_ALL_PREFIXES(content::ContentBrowserTest, RunTimeoutInstalled); + + // Exposes the RunLoopTimeout to the friend tests (see above). + static const RunLoop::RunLoopTimeout* GetTimeoutForCurrentThread(); + + const RunLoop::RunLoopTimeout* const nested_timeout_; + RunLoop::RunLoopTimeout run_timeout_; +}; + +class ScopedDisableRunLoopTimeout { + public: + ScopedDisableRunLoopTimeout(); + ~ScopedDisableRunLoopTimeout(); + + ScopedDisableRunLoopTimeout(const ScopedDisableRunLoopTimeout&) = delete; + ScopedDisableRunLoopTimeout& operator=(const ScopedDisableRunLoopTimeout&) = + delete; + + private: + const RunLoop::RunLoopTimeout* const nested_timeout_; +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_SCOPED_RUN_LOOP_TIMEOUT_H_ diff --git a/chromium/base/test/scoped_run_loop_timeout_unittest.cc b/chromium/base/test/scoped_run_loop_timeout_unittest.cc new file mode 100644 index 00000000000..c60bf71569e --- /dev/null +++ b/chromium/base/test/scoped_run_loop_timeout_unittest.cc @@ -0,0 +1,79 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/scoped_run_loop_timeout.h" + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/location.h" +#include "base/test/bind_test_util.h" +#include "base/test/gtest_util.h" +#include "base/test/task_environment.h" +#include "base/threading/sequenced_task_runner_handle.h" +#include "base/time/time.h" +#include "testing/gtest/include/gtest/gtest-spi.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace test { + +TEST(ScopedRunLoopTimeoutTest, TimesOut) { + TaskEnvironment task_environment; + RunLoop run_loop; + + static constexpr auto kArbitraryTimeout = TimeDelta::FromMilliseconds(10); + ScopedRunLoopTimeout run_timeout(FROM_HERE, kArbitraryTimeout); + + // Since the delayed task will be posted only after the message pump starts + // running, the ScopedRunLoopTimeout will already have started to elapse, + // so if Run() exits at the correct time then our delayed task will not run. + SequencedTaskRunnerHandle::Get()->PostTask( + FROM_HERE, + BindOnce(IgnoreResult(&SequencedTaskRunner::PostDelayedTask), + SequencedTaskRunnerHandle::Get(), FROM_HERE, + MakeExpectedNotRunClosure(FROM_HERE), kArbitraryTimeout)); + + // This task should get to run before Run() times-out. + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, MakeExpectedRunClosure(FROM_HERE), kArbitraryTimeout); + + // EXPECT_FATAL_FAILURE() can only reference globals and statics. + static RunLoop& static_loop = run_loop; + EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out."); +} + +TEST(ScopedRunLoopTimeoutTest, RunTasksUntilTimeout) { + TaskEnvironment task_environment; + RunLoop run_loop; + + static constexpr auto kArbitraryTimeout = TimeDelta::FromMilliseconds(10); + ScopedRunLoopTimeout run_timeout(FROM_HERE, kArbitraryTimeout); + + // Posting a task with the same delay as our timeout, immediately before + // calling Run(), means it should get to run. Since this uses QuitWhenIdle(), + // the Run() timeout callback should also get to run. + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, MakeExpectedRunClosure(FROM_HERE), kArbitraryTimeout); + + // EXPECT_FATAL_FAILURE() can only reference globals and statics. + static RunLoop& static_loop = run_loop; + EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out."); +} + +TEST(ScopedRunLoopTimeoutTest, OnTimeoutLog) { + TaskEnvironment task_environment; + RunLoop run_loop; + + static constexpr auto kArbitraryTimeout = TimeDelta::FromMilliseconds(10); + ScopedRunLoopTimeout run_timeout( + FROM_HERE, kArbitraryTimeout, + BindRepeating([]() -> std::string { return "I like kittens!"; })); + + // EXPECT_FATAL_FAILURE() can only reference globals and statics. + static RunLoop& static_loop = run_loop; + EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.\nI like kittens!"); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/sequenced_task_runner_test_template.cc b/chromium/base/test/sequenced_task_runner_test_template.cc new file mode 100644 index 00000000000..bccc301ab7d --- /dev/null +++ b/chromium/base/test/sequenced_task_runner_test_template.cc @@ -0,0 +1,270 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/sequenced_task_runner_test_template.h" + +#include <ostream> + +#include "base/location.h" + +namespace base { + +namespace internal { + +TaskEvent::TaskEvent(int i, Type type) + : i(i), type(type) { +} + +SequencedTaskTracker::SequencedTaskTracker() + : next_post_i_(0), + task_end_count_(0), + task_end_cv_(&lock_) { +} + +void SequencedTaskTracker::PostWrappedNonNestableTask( + SequencedTaskRunner* task_runner, + OnceClosure task) { + AutoLock event_lock(lock_); + const int post_i = next_post_i_++; + auto wrapped_task = + BindOnce(&SequencedTaskTracker::RunTask, this, std::move(task), post_i); + task_runner->PostNonNestableTask(FROM_HERE, std::move(wrapped_task)); + TaskPosted(post_i); +} + +void SequencedTaskTracker::PostWrappedNestableTask( + SequencedTaskRunner* task_runner, + OnceClosure task) { + AutoLock event_lock(lock_); + const int post_i = next_post_i_++; + auto wrapped_task = + BindOnce(&SequencedTaskTracker::RunTask, this, std::move(task), post_i); + task_runner->PostTask(FROM_HERE, std::move(wrapped_task)); + TaskPosted(post_i); +} + +void SequencedTaskTracker::PostWrappedDelayedNonNestableTask( + SequencedTaskRunner* task_runner, + OnceClosure task, + TimeDelta delay) { + AutoLock event_lock(lock_); + const int post_i = next_post_i_++; + auto wrapped_task = + BindOnce(&SequencedTaskTracker::RunTask, this, std::move(task), post_i); + task_runner->PostNonNestableDelayedTask(FROM_HERE, std::move(wrapped_task), + delay); + TaskPosted(post_i); +} + +void SequencedTaskTracker::PostNonNestableTasks( + SequencedTaskRunner* task_runner, + int task_count) { + for (int i = 0; i < task_count; ++i) { + PostWrappedNonNestableTask(task_runner, OnceClosure()); + } +} + +void SequencedTaskTracker::RunTask(OnceClosure task, int task_i) { + TaskStarted(task_i); + if (!task.is_null()) + std::move(task).Run(); + TaskEnded(task_i); +} + +void SequencedTaskTracker::TaskPosted(int i) { + // Caller must own |lock_|. + events_.push_back(TaskEvent(i, TaskEvent::POST)); +} + +void SequencedTaskTracker::TaskStarted(int i) { + AutoLock lock(lock_); + events_.push_back(TaskEvent(i, TaskEvent::START)); +} + +void SequencedTaskTracker::TaskEnded(int i) { + AutoLock lock(lock_); + events_.push_back(TaskEvent(i, TaskEvent::END)); + ++task_end_count_; + task_end_cv_.Signal(); +} + +const std::vector<TaskEvent>& +SequencedTaskTracker::GetTaskEvents() const { + return events_; +} + +void SequencedTaskTracker::WaitForCompletedTasks(int count) { + AutoLock lock(lock_); + while (task_end_count_ < count) + task_end_cv_.Wait(); +} + +SequencedTaskTracker::~SequencedTaskTracker() = default; + +void PrintTo(const TaskEvent& event, std::ostream* os) { + *os << "(i=" << event.i << ", type="; + switch (event.type) { + case TaskEvent::POST: *os << "POST"; break; + case TaskEvent::START: *os << "START"; break; + case TaskEvent::END: *os << "END"; break; + } + *os << ")"; +} + +namespace { + +// Returns the task ordinals for the task event type |type| in the order that +// they were recorded. +std::vector<int> GetEventTypeOrder(const std::vector<TaskEvent>& events, + TaskEvent::Type type) { + std::vector<int> tasks; + std::vector<TaskEvent>::const_iterator event; + for (event = events.begin(); event != events.end(); ++event) { + if (event->type == type) + tasks.push_back(event->i); + } + return tasks; +} + +// Returns all task events for task |task_i|. +std::vector<TaskEvent::Type> GetEventsForTask( + const std::vector<TaskEvent>& events, + int task_i) { + std::vector<TaskEvent::Type> task_event_orders; + std::vector<TaskEvent>::const_iterator event; + for (event = events.begin(); event != events.end(); ++event) { + if (event->i == task_i) + task_event_orders.push_back(event->type); + } + return task_event_orders; +} + +// Checks that the task events for each task in |events| occur in the order +// {POST, START, END}, and that there is only one instance of each event type +// per task. +::testing::AssertionResult CheckEventOrdersForEachTask( + const std::vector<TaskEvent>& events, + int task_count) { + std::vector<TaskEvent::Type> expected_order; + expected_order.push_back(TaskEvent::POST); + expected_order.push_back(TaskEvent::START); + expected_order.push_back(TaskEvent::END); + + // This is O(n^2), but it runs fast enough currently so is not worth + // optimizing. + for (int i = 0; i < task_count; ++i) { + const std::vector<TaskEvent::Type> task_events = + GetEventsForTask(events, i); + if (task_events != expected_order) { + return ::testing::AssertionFailure() + << "Events for task " << i << " are out of order; expected: " + << ::testing::PrintToString(expected_order) << "; actual: " + << ::testing::PrintToString(task_events); + } + } + return ::testing::AssertionSuccess(); +} + +// Checks that no two tasks were running at the same time. I.e. the only +// events allowed between the START and END of a task are the POSTs of other +// tasks. +::testing::AssertionResult CheckNoTaskRunsOverlap( + const std::vector<TaskEvent>& events) { + // If > -1, we're currently inside a START, END pair. + int current_task_i = -1; + + std::vector<TaskEvent>::const_iterator event; + for (event = events.begin(); event != events.end(); ++event) { + bool spurious_event_found = false; + + if (current_task_i == -1) { // Not inside a START, END pair. + switch (event->type) { + case TaskEvent::POST: + break; + case TaskEvent::START: + current_task_i = event->i; + break; + case TaskEvent::END: + spurious_event_found = true; + break; + } + + } else { // Inside a START, END pair. + bool interleaved_task_detected = false; + + switch (event->type) { + case TaskEvent::POST: + if (event->i == current_task_i) + spurious_event_found = true; + break; + case TaskEvent::START: + interleaved_task_detected = true; + break; + case TaskEvent::END: + if (event->i != current_task_i) + interleaved_task_detected = true; + else + current_task_i = -1; + break; + } + + if (interleaved_task_detected) { + return ::testing::AssertionFailure() + << "Found event " << ::testing::PrintToString(*event) + << " between START and END events for task " << current_task_i + << "; event dump: " << ::testing::PrintToString(events); + } + } + + if (spurious_event_found) { + const int event_i = event - events.begin(); + return ::testing::AssertionFailure() + << "Spurious event " << ::testing::PrintToString(*event) + << " at position " << event_i << "; event dump: " + << ::testing::PrintToString(events); + } + } + + return ::testing::AssertionSuccess(); +} + +} // namespace + +::testing::AssertionResult CheckNonNestableInvariants( + const std::vector<TaskEvent>& events, + int task_count) { + const std::vector<int> post_order = + GetEventTypeOrder(events, TaskEvent::POST); + const std::vector<int> start_order = + GetEventTypeOrder(events, TaskEvent::START); + const std::vector<int> end_order = + GetEventTypeOrder(events, TaskEvent::END); + + if (start_order != post_order) { + return ::testing::AssertionFailure() + << "Expected START order (which equals actual POST order): \n" + << ::testing::PrintToString(post_order) + << "\n Actual START order:\n" + << ::testing::PrintToString(start_order); + } + + if (end_order != post_order) { + return ::testing::AssertionFailure() + << "Expected END order (which equals actual POST order): \n" + << ::testing::PrintToString(post_order) + << "\n Actual END order:\n" + << ::testing::PrintToString(end_order); + } + + const ::testing::AssertionResult result = + CheckEventOrdersForEachTask(events, task_count); + if (!result) + return result; + + return CheckNoTaskRunsOverlap(events); +} + +} // namespace internal + +} // namespace base diff --git a/chromium/base/test/sequenced_task_runner_test_template.h b/chromium/base/test/sequenced_task_runner_test_template.h new file mode 100644 index 00000000000..541ccae6727 --- /dev/null +++ b/chromium/base/test/sequenced_task_runner_test_template.h @@ -0,0 +1,350 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// SequencedTaskRunnerTest defines tests that implementations of +// SequencedTaskRunner should pass in order to be conformant. +// See task_runner_test_template.h for a description of how to use the +// constructs in this file; these work the same. + +#ifndef BASE_TEST_SEQUENCED_TASK_RUNNER_TEST_TEMPLATE_H_ +#define BASE_TEST_SEQUENCED_TASK_RUNNER_TEST_TEMPLATE_H_ + +#include <cstddef> +#include <iosfwd> +#include <vector> + +#include "base/bind.h" +#include "base/callback.h" +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/sequenced_task_runner.h" +#include "base/synchronization/condition_variable.h" +#include "base/synchronization/lock.h" +#include "base/time/time.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +namespace internal { + +struct TaskEvent { + enum Type { POST, START, END }; + TaskEvent(int i, Type type); + int i; + Type type; +}; + +// Utility class used in the tests below. +class SequencedTaskTracker : public RefCountedThreadSafe<SequencedTaskTracker> { + public: + SequencedTaskTracker(); + + // Posts the non-nestable task |task|, and records its post event. + void PostWrappedNonNestableTask(SequencedTaskRunner* task_runner, + OnceClosure task); + + // Posts the nestable task |task|, and records its post event. + void PostWrappedNestableTask(SequencedTaskRunner* task_runner, + OnceClosure task); + + // Posts the delayed non-nestable task |task|, and records its post event. + void PostWrappedDelayedNonNestableTask(SequencedTaskRunner* task_runner, + OnceClosure task, + TimeDelta delay); + + // Posts |task_count| non-nestable tasks. + void PostNonNestableTasks(SequencedTaskRunner* task_runner, int task_count); + + const std::vector<TaskEvent>& GetTaskEvents() const; + + // Returns after the tracker observes a total of |count| task completions. + void WaitForCompletedTasks(int count); + + private: + friend class RefCountedThreadSafe<SequencedTaskTracker>; + + ~SequencedTaskTracker(); + + // A task which runs |task|, recording the start and end events. + void RunTask(OnceClosure task, int task_i); + + // Records a post event for task |i|. The owner is expected to be holding + // |lock_| (unlike |TaskStarted| and |TaskEnded|). + void TaskPosted(int i); + + // Records a start event for task |i|. + void TaskStarted(int i); + + // Records a end event for task |i|. + void TaskEnded(int i); + + // Protects events_, next_post_i_, task_end_count_ and task_end_cv_. + Lock lock_; + + // The events as they occurred for each task (protected by lock_). + std::vector<TaskEvent> events_; + + // The ordinal to be used for the next task-posting task (protected by + // lock_). + int next_post_i_; + + // The number of task end events we've received. + int task_end_count_; + ConditionVariable task_end_cv_; + + DISALLOW_COPY_AND_ASSIGN(SequencedTaskTracker); +}; + +void PrintTo(const TaskEvent& event, std::ostream* os); + +// Checks the non-nestable task invariants for all tasks in |events|. +// +// The invariants are: +// 1) Events started and ended in the same order that they were posted. +// 2) Events for an individual tasks occur in the order {POST, START, END}, +// and there is only one instance of each event type for a task. +// 3) The only events between a task's START and END events are the POSTs of +// other tasks. I.e. tasks were run sequentially, not interleaved. +::testing::AssertionResult CheckNonNestableInvariants( + const std::vector<TaskEvent>& events, + int task_count); + +} // namespace internal + +template <typename TaskRunnerTestDelegate> +class SequencedTaskRunnerTest : public testing::Test { + protected: + SequencedTaskRunnerTest() + : task_tracker_(new internal::SequencedTaskTracker()) {} + + const scoped_refptr<internal::SequencedTaskTracker> task_tracker_; + TaskRunnerTestDelegate delegate_; +}; + +TYPED_TEST_SUITE_P(SequencedTaskRunnerTest); + +// This test posts N non-nestable tasks in sequence, and expects them to run +// in FIFO order, with no part of any two tasks' execution +// overlapping. I.e. that each task starts only after the previously-posted +// one has finished. +TYPED_TEST_P(SequencedTaskRunnerTest, SequentialNonNestable) { + const int kTaskCount = 1000; + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + this->task_tracker_->PostWrappedNonNestableTask( + task_runner.get(), + BindOnce(&PlatformThread::Sleep, TimeDelta::FromSeconds(1))); + for (int i = 1; i < kTaskCount; ++i) { + this->task_tracker_->PostWrappedNonNestableTask(task_runner.get(), + OnceClosure()); + } + + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); +} + +// This test posts N nestable tasks in sequence. It has the same expectations +// as SequentialNonNestable because even though the tasks are nestable, they +// will not be run nestedly in this case. +TYPED_TEST_P(SequencedTaskRunnerTest, SequentialNestable) { + const int kTaskCount = 1000; + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + this->task_tracker_->PostWrappedNestableTask( + task_runner.get(), + BindOnce(&PlatformThread::Sleep, TimeDelta::FromSeconds(1))); + for (int i = 1; i < kTaskCount; ++i) { + this->task_tracker_->PostWrappedNestableTask(task_runner.get(), + OnceClosure()); + } + + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); +} + +// This test posts non-nestable tasks in order of increasing delay, and checks +// that that the tasks are run in FIFO order and that there is no execution +// overlap whatsoever between any two tasks. +TYPED_TEST_P(SequencedTaskRunnerTest, SequentialDelayedNonNestable) { + const int kTaskCount = 20; + const int kDelayIncrementMs = 50; + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + for (int i = 0; i < kTaskCount; ++i) { + this->task_tracker_->PostWrappedDelayedNonNestableTask( + task_runner.get(), OnceClosure(), + TimeDelta::FromMilliseconds(kDelayIncrementMs * i)); + } + + this->task_tracker_->WaitForCompletedTasks(kTaskCount); + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); +} + +// This test posts a fast, non-nestable task from within each of a number of +// slow, non-nestable tasks and checks that they all run in the sequence they +// were posted in and that there is no execution overlap whatsoever. +TYPED_TEST_P(SequencedTaskRunnerTest, NonNestablePostFromNonNestableTask) { + const int kParentCount = 10; + const int kChildrenPerParent = 10; + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + for (int i = 0; i < kParentCount; ++i) { + auto task = BindOnce(&internal::SequencedTaskTracker::PostNonNestableTasks, + this->task_tracker_, RetainedRef(task_runner), + kChildrenPerParent); + this->task_tracker_->PostWrappedNonNestableTask(task_runner.get(), + std::move(task)); + } + + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants( + this->task_tracker_->GetTaskEvents(), + kParentCount * (kChildrenPerParent + 1))); +} + +// This test posts two tasks with the same delay, and checks that the tasks are +// run in the order in which they were posted. +// +// NOTE: This is actually an approximate test since the API only takes a +// "delay" parameter, so we are not exactly simulating two tasks that get +// posted at the exact same time. It would be nice if the API allowed us to +// specify the desired run time. +TYPED_TEST_P(SequencedTaskRunnerTest, DelayedTasksSameDelay) { + const int kTaskCount = 2; + const TimeDelta kDelay = TimeDelta::FromMilliseconds(100); + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + this->task_tracker_->PostWrappedDelayedNonNestableTask(task_runner.get(), + OnceClosure(), kDelay); + this->task_tracker_->PostWrappedDelayedNonNestableTask(task_runner.get(), + OnceClosure(), kDelay); + this->task_tracker_->WaitForCompletedTasks(kTaskCount); + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); +} + +// This test posts a normal task and a delayed task, and checks that the +// delayed task runs after the normal task even if the normal task takes +// a long time to run. +TYPED_TEST_P(SequencedTaskRunnerTest, DelayedTaskAfterLongTask) { + const int kTaskCount = 2; + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + this->task_tracker_->PostWrappedNonNestableTask( + task_runner.get(), + base::BindOnce(&PlatformThread::Sleep, TimeDelta::FromMilliseconds(50))); + this->task_tracker_->PostWrappedDelayedNonNestableTask( + task_runner.get(), OnceClosure(), TimeDelta::FromMilliseconds(10)); + this->task_tracker_->WaitForCompletedTasks(kTaskCount); + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); +} + +// Test that a pile of normal tasks and a delayed task run in the +// time-to-run order. +TYPED_TEST_P(SequencedTaskRunnerTest, DelayedTaskAfterManyLongTasks) { + const int kTaskCount = 11; + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + for (int i = 0; i < kTaskCount - 1; i++) { + this->task_tracker_->PostWrappedNonNestableTask( + task_runner.get(), base::BindOnce(&PlatformThread::Sleep, + TimeDelta::FromMilliseconds(50))); + } + this->task_tracker_->PostWrappedDelayedNonNestableTask( + task_runner.get(), OnceClosure(), TimeDelta::FromMilliseconds(10)); + this->task_tracker_->WaitForCompletedTasks(kTaskCount); + this->delegate_.StopTaskRunner(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); +} + + +// TODO(francoisk777@gmail.com) Add a test, similiar to the above, which runs +// some tasked nestedly (which should be implemented in the test +// delegate). Also add, to the the test delegate, a predicate which checks +// whether the implementation supports nested tasks. +// + +// The SequencedTaskRunnerTest test case verifies behaviour that is expected +// from a sequenced task runner in order to be conformant. +REGISTER_TYPED_TEST_SUITE_P(SequencedTaskRunnerTest, + SequentialNonNestable, + SequentialNestable, + SequentialDelayedNonNestable, + NonNestablePostFromNonNestableTask, + DelayedTasksSameDelay, + DelayedTaskAfterLongTask, + DelayedTaskAfterManyLongTasks); + +template <typename TaskRunnerTestDelegate> +class SequencedTaskRunnerDelayedTest + : public SequencedTaskRunnerTest<TaskRunnerTestDelegate> {}; + +TYPED_TEST_SUITE_P(SequencedTaskRunnerDelayedTest); + +// This test posts a delayed task, and checks that the task is run later than +// the specified time. +TYPED_TEST_P(SequencedTaskRunnerDelayedTest, DelayedTaskBasic) { + const int kTaskCount = 1; + const TimeDelta kDelay = TimeDelta::FromMilliseconds(100); + + this->delegate_.StartTaskRunner(); + const scoped_refptr<SequencedTaskRunner> task_runner = + this->delegate_.GetTaskRunner(); + + Time time_before_run = Time::Now(); + this->task_tracker_->PostWrappedDelayedNonNestableTask(task_runner.get(), + OnceClosure(), kDelay); + this->task_tracker_->WaitForCompletedTasks(kTaskCount); + this->delegate_.StopTaskRunner(); + Time time_after_run = Time::Now(); + + EXPECT_TRUE(CheckNonNestableInvariants(this->task_tracker_->GetTaskEvents(), + kTaskCount)); + EXPECT_LE(kDelay, time_after_run - time_before_run); +} + +// SequencedTaskRunnerDelayedTest tests that the |delay| parameter of +// is used to actually wait for |delay| ms before executing the task. +// This is not mandatory for a SequencedTaskRunner to be compliant. +REGISTER_TYPED_TEST_SUITE_P(SequencedTaskRunnerDelayedTest, DelayedTaskBasic); + +} // namespace base + +#endif // BASE_TEST_SEQUENCED_TASK_RUNNER_TEST_TEMPLATE_H_ diff --git a/chromium/base/test/simple_test_clock.cc b/chromium/base/test/simple_test_clock.cc new file mode 100644 index 00000000000..7486d793581 --- /dev/null +++ b/chromium/base/test/simple_test_clock.cc @@ -0,0 +1,28 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/simple_test_clock.h" + +namespace base { + +SimpleTestClock::SimpleTestClock() = default; + +SimpleTestClock::~SimpleTestClock() = default; + +Time SimpleTestClock::Now() const { + AutoLock lock(lock_); + return now_; +} + +void SimpleTestClock::Advance(TimeDelta delta) { + AutoLock lock(lock_); + now_ += delta; +} + +void SimpleTestClock::SetNow(Time now) { + AutoLock lock(lock_); + now_ = now; +} + +} // namespace base diff --git a/chromium/base/test/simple_test_clock.h b/chromium/base/test/simple_test_clock.h new file mode 100644 index 00000000000..0cbcf082632 --- /dev/null +++ b/chromium/base/test/simple_test_clock.h @@ -0,0 +1,41 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SIMPLE_TEST_CLOCK_H_ +#define BASE_TEST_SIMPLE_TEST_CLOCK_H_ + +#include "base/compiler_specific.h" +#include "base/synchronization/lock.h" +#include "base/time/clock.h" +#include "base/time/time.h" + +namespace base { + +// SimpleTestClock is a Clock implementation that gives control over +// the returned Time objects. All methods may be called from any +// thread. +class SimpleTestClock : public Clock { + public: + // Starts off with a clock set to Time(). + SimpleTestClock(); + ~SimpleTestClock() override; + + Time Now() const override; + + // Advances the clock by |delta|. + void Advance(TimeDelta delta); + + // Sets the clock to the given time. + void SetNow(Time now); + + private: + // Protects |now_|. + mutable Lock lock_; + + Time now_; +}; + +} // namespace base + +#endif // BASE_TEST_SIMPLE_TEST_CLOCK_H_ diff --git a/chromium/base/test/simple_test_tick_clock.cc b/chromium/base/test/simple_test_tick_clock.cc new file mode 100644 index 00000000000..3efeaceeef5 --- /dev/null +++ b/chromium/base/test/simple_test_tick_clock.cc @@ -0,0 +1,31 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/simple_test_tick_clock.h" + +#include "base/check.h" + +namespace base { + +SimpleTestTickClock::SimpleTestTickClock() = default; + +SimpleTestTickClock::~SimpleTestTickClock() = default; + +TimeTicks SimpleTestTickClock::NowTicks() const { + AutoLock lock(lock_); + return now_ticks_; +} + +void SimpleTestTickClock::Advance(TimeDelta delta) { + AutoLock lock(lock_); + DCHECK(delta >= TimeDelta()); + now_ticks_ += delta; +} + +void SimpleTestTickClock::SetNowTicks(TimeTicks ticks) { + AutoLock lock(lock_); + now_ticks_ = ticks; +} + +} // namespace base diff --git a/chromium/base/test/simple_test_tick_clock.h b/chromium/base/test/simple_test_tick_clock.h new file mode 100644 index 00000000000..923eba4a9af --- /dev/null +++ b/chromium/base/test/simple_test_tick_clock.h @@ -0,0 +1,41 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_ +#define BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_ + +#include "base/compiler_specific.h" +#include "base/synchronization/lock.h" +#include "base/time/tick_clock.h" +#include "base/time/time.h" + +namespace base { + +// SimpleTestTickClock is a TickClock implementation that gives +// control over the returned TimeTicks objects. All methods may be +// called from any thread. +class SimpleTestTickClock : public TickClock { + public: + // Starts off with a clock set to TimeTicks(). + SimpleTestTickClock(); + ~SimpleTestTickClock() override; + + TimeTicks NowTicks() const override; + + // Advances the clock by |delta|, which must not be negative. + void Advance(TimeDelta delta); + + // Sets the clock to the given time. + void SetNowTicks(TimeTicks ticks); + + private: + // Protects |now_ticks_|. + mutable Lock lock_; + + TimeTicks now_ticks_; +}; + +} // namespace base + +#endif // BASE_TEST_SIMPLE_TEST_TICK_CLOCK_H_ diff --git a/chromium/base/test/spin_wait.h b/chromium/base/test/spin_wait.h new file mode 100644 index 00000000000..42b3b3510f1 --- /dev/null +++ b/chromium/base/test/spin_wait.h @@ -0,0 +1,52 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file provides a macro ONLY for use in testing. +// DO NOT USE IN PRODUCTION CODE. There are much better ways to wait. + +// This code is very helpful in testing multi-threaded code, without depending +// on almost any primitives. This is especially helpful if you are testing +// those primitive multi-threaded constructs. + +// We provide a simple one argument spin wait (for 1 second), and a generic +// spin wait (for longer periods of time). + +#ifndef BASE_TEST_SPIN_WAIT_H_ +#define BASE_TEST_SPIN_WAIT_H_ + +#include "base/threading/platform_thread.h" +#include "base/time/time.h" + +// Provide a macro that will wait no longer than 1 second for an asynchronous +// change is the value of an expression. +// A typical use would be: +// +// SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(0 == f(x)); +// +// The expression will be evaluated repeatedly until it is true, or until +// the time (1 second) expires. +// Since tests generally have a 5 second watch dog timer, this spin loop is +// typically used to get the padding needed on a given test platform to assure +// that the test passes, even if load varies, and external events vary. + +#define SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(expression) \ + SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(base::TimeDelta::FromSeconds(1), \ + (expression)) + +#define SPIN_FOR_TIMEDELTA_OR_UNTIL_TRUE(delta, expression) \ + do { \ + base::TimeTicks spin_wait_start = base::TimeTicks::Now(); \ + const base::TimeDelta kSpinWaitTimeout = delta; \ + while (!(expression)) { \ + if (kSpinWaitTimeout < base::TimeTicks::Now() - spin_wait_start) { \ + EXPECT_LE((base::TimeTicks::Now() - spin_wait_start).InMilliseconds(), \ + kSpinWaitTimeout.InMilliseconds()) \ + << "Timed out"; \ + break; \ + } \ + base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(50)); \ + } \ + } while (0) + +#endif // BASE_TEST_SPIN_WAIT_H_ diff --git a/chromium/base/test/task_environment.cc b/chromium/base/test/task_environment.cc new file mode 100644 index 00000000000..e8aa5d502a5 --- /dev/null +++ b/chromium/base/test/task_environment.cc @@ -0,0 +1,808 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/task_environment.h" + +#include <algorithm> +#include <memory> + +#include "base/bind_helpers.h" +#include "base/lazy_instance.h" +#include "base/location.h" +#include "base/logging.h" +#include "base/memory/ptr_util.h" +#include "base/message_loop/message_pump.h" +#include "base/message_loop/message_pump_type.h" +#include "base/no_destructor.h" +#include "base/run_loop.h" +#include "base/synchronization/condition_variable.h" +#include "base/synchronization/lock.h" +#include "base/task/post_task.h" +#include "base/task/sequence_manager/sequence_manager_impl.h" +#include "base/task/sequence_manager/time_domain.h" +#include "base/task/simple_task_executor.h" +#include "base/task/thread_pool/thread_pool_impl.h" +#include "base/task/thread_pool/thread_pool_instance.h" +#include "base/test/bind_test_util.h" +#include "base/test/test_mock_time_task_runner.h" +#include "base/test/test_timeouts.h" +#include "base/thread_annotations.h" +#include "base/threading/sequence_local_storage_map.h" +#include "base/threading/thread_local.h" +#include "base/threading/thread_restrictions.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/time/clock.h" +#include "base/time/tick_clock.h" +#include "base/time/time.h" +#include "base/time/time_override.h" +#include "testing/gtest/include/gtest/gtest.h" + +#if defined(OS_POSIX) || defined(OS_FUCHSIA) +#include "base/files/file_descriptor_watcher_posix.h" +#endif + +namespace base { +namespace test { + +namespace { + +ObserverList<TaskEnvironment::DestructionObserver>& GetDestructionObservers() { + static NoDestructor<ObserverList<TaskEnvironment::DestructionObserver>> + instance; + return *instance; +} + +base::MessagePumpType GetMessagePumpTypeForMainThreadType( + TaskEnvironment::MainThreadType main_thread_type) { + switch (main_thread_type) { + case TaskEnvironment::MainThreadType::DEFAULT: + return MessagePumpType::DEFAULT; + case TaskEnvironment::MainThreadType::UI: + return MessagePumpType::UI; + case TaskEnvironment::MainThreadType::IO: + return MessagePumpType::IO; + } + NOTREACHED(); + return MessagePumpType::DEFAULT; +} + +std::unique_ptr<sequence_manager::SequenceManager> +CreateSequenceManagerForMainThreadType( + TaskEnvironment::MainThreadType main_thread_type) { + auto type = GetMessagePumpTypeForMainThreadType(main_thread_type); + return sequence_manager::CreateSequenceManagerOnCurrentThreadWithPump( + MessagePump::Create(type), + base::sequence_manager::SequenceManager::Settings::Builder() + .SetMessagePumpType(type) + .Build()); +} + +class TickClockBasedClock : public Clock { + public: + explicit TickClockBasedClock(const TickClock* tick_clock) + : tick_clock_(*tick_clock), + start_ticks_(tick_clock_.NowTicks()), + start_time_(Time::UnixEpoch()) {} + + Time Now() const override { + return start_time_ + (tick_clock_.NowTicks() - start_ticks_); + } + + private: + const TickClock& tick_clock_; + const TimeTicks start_ticks_; + const Time start_time_; +}; + +} // namespace + +class TaskEnvironment::TestTaskTracker + : public internal::ThreadPoolImpl::TaskTrackerImpl { + public: + TestTaskTracker(); + + // Allow running tasks. Returns whether tasks were previously allowed to run. + bool AllowRunTasks(); + + // Disallow running tasks. Returns true on success; success requires there to + // be no tasks currently running. Returns false if >0 tasks are currently + // running. Prior to returning false, it will attempt to block until at least + // one task has completed (in an attempt to avoid callers busy-looping + // DisallowRunTasks() calls with the same set of slowly ongoing tasks). This + // block attempt will also have a short timeout (in an attempt to prevent the + // fallout of blocking: if the only task remaining is blocked on the main + // thread, waiting for it to complete results in a deadlock...). + bool DisallowRunTasks(); + + // Returns true if tasks are currently allowed to run. + bool TasksAllowedToRun() const; + + private: + friend class TaskEnvironment; + + // internal::ThreadPoolImpl::TaskTrackerImpl: + void RunTask(internal::Task task, + internal::TaskSource* sequence, + const TaskTraits& traits) override; + + // Synchronizes accesses to members below. + mutable Lock lock_; + + // True if running tasks is allowed. + bool can_run_tasks_ GUARDED_BY(lock_) = true; + + // Signaled when |can_run_tasks_| becomes true. + ConditionVariable can_run_tasks_cv_ GUARDED_BY(lock_); + + // Signaled when a task is completed. + ConditionVariable task_completed_cv_ GUARDED_BY(lock_); + + // Number of tasks that are currently running. + int num_tasks_running_ GUARDED_BY(lock_) = 0; + + DISALLOW_COPY_AND_ASSIGN(TestTaskTracker); +}; + +class TaskEnvironment::MockTimeDomain : public sequence_manager::TimeDomain, + public TickClock { + public: + explicit MockTimeDomain(sequence_manager::SequenceManager* sequence_manager) + : sequence_manager_(sequence_manager) { + DCHECK_EQ(nullptr, current_mock_time_domain_); + current_mock_time_domain_ = this; + } + + ~MockTimeDomain() override { + DCHECK_EQ(this, current_mock_time_domain_); + current_mock_time_domain_ = nullptr; + } + + static MockTimeDomain* current_mock_time_domain_; + + static Time GetTime() { + return Time::UnixEpoch() + (current_mock_time_domain_->Now() - TimeTicks()); + } + + static TimeTicks GetTimeTicks() { return current_mock_time_domain_->Now(); } + + using TimeDomain::NextScheduledRunTime; + + Optional<TimeTicks> NextScheduledRunTime() const { + // The TimeDomain doesn't know about immediate tasks, check if we have any. + if (!sequence_manager_->IsIdleForTesting()) + return Now(); + return TimeDomain::NextScheduledRunTime(); + } + + void AdvanceClock(TimeDelta delta) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + { + AutoLock lock(now_ticks_lock_); + now_ticks_ += delta; + } + if (thread_pool_) + thread_pool_->ProcessRipeDelayedTasksForTesting(); + } + + static std::unique_ptr<TaskEnvironment::MockTimeDomain> CreateAndRegister( + sequence_manager::SequenceManager* sequence_manager) { + auto mock_time_domain = + std::make_unique<TaskEnvironment::MockTimeDomain>(sequence_manager); + sequence_manager->RegisterTimeDomain(mock_time_domain.get()); + return mock_time_domain; + } + + void SetThreadPool(internal::ThreadPoolImpl* thread_pool, + const TestTaskTracker* thread_pool_task_tracker) { + DCHECK(!thread_pool_); + DCHECK(!thread_pool_task_tracker_); + thread_pool_ = thread_pool; + thread_pool_task_tracker_ = thread_pool_task_tracker; + } + + // sequence_manager::TimeDomain: + + sequence_manager::LazyNow CreateLazyNow() const override { + AutoLock lock(now_ticks_lock_); + return sequence_manager::LazyNow(now_ticks_); + } + + TimeTicks Now() const override { + // This can be called from any thread. + AutoLock lock(now_ticks_lock_); + return now_ticks_; + } + + Optional<TimeDelta> DelayTillNextTask( + sequence_manager::LazyNow* lazy_now) override { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + // Make sure TimeDomain::NextScheduledRunTime has taken canceled tasks into + // account, ReclaimMemory sweeps canceled delayed tasks. + sequence_manager()->ReclaimMemory(); + Optional<TimeTicks> run_time = NextScheduledRunTime(); + // Check if we've run out of tasks. + if (!run_time) + return base::nullopt; + + // Check if we have a task that should be running now. Reading |now_ticks_| + // from the main thread doesn't require the lock. + if (run_time <= TS_UNCHECKED_READ(now_ticks_)) + return base::TimeDelta(); + + // The next task is a future delayed task. Since we're using mock time, we + // don't want an actual OS level delayed wake up scheduled, so pretend we + // have no more work. This will result in appearing idle, TaskEnvironment + // will decide what to do based on that (return to caller or fast-forward + // time). + return base::nullopt; + } + + // This method is called when the underlying message pump has run out of + // non-delayed work. Advances time to the next task unless + // |quit_when_idle_requested| or TaskEnvironment controls mock time. + bool MaybeFastForwardToNextTask(bool quit_when_idle_requested) override { + if (quit_when_idle_requested) + return false; + + return FastForwardToNextTaskOrCap(TimeTicks::Max()) == + NextTaskSource::kMainThread; + } + + const char* GetName() const override { return "MockTimeDomain"; } + + // TickClock implementation: + TimeTicks NowTicks() const override { return Now(); } + + // Used by FastForwardToNextTaskOrCap() to return which task source time was + // advanced to. + enum class NextTaskSource { + // Out of tasks under |fast_forward_cap|. + kNone, + // There's now >=1 immediate task on the main thread. + kMainThread, + // There's now >=1 immediate task in the thread pool. + kThreadPool, + }; + + // Advances time to the first of : next main thread task, next thread pool + // task, or |fast_forward_cap| (if it's not Max()). + NextTaskSource FastForwardToNextTaskOrCap(TimeTicks fast_forward_cap) { + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + // We don't need to call ReclaimMemory here because + // DelayTillNextTask will have dealt with cancelled delayed tasks for us. + Optional<TimeTicks> next_main_thread_task_time = NextScheduledRunTime(); + + // Consider the next thread pool tasks iff they're running. + Optional<TimeTicks> next_thread_pool_task_time; + if (thread_pool_ && thread_pool_task_tracker_->TasksAllowedToRun()) { + next_thread_pool_task_time = + thread_pool_->NextScheduledRunTimeForTesting(); + } + + // Custom comparison logic to consider nullopt the largest rather than + // smallest value. Could consider using TimeTicks::Max() instead of nullopt + // to represent out-of-tasks? + Optional<TimeTicks> next_task_time; + if (!next_main_thread_task_time) { + next_task_time = next_thread_pool_task_time; + } else if (!next_thread_pool_task_time) { + next_task_time = next_main_thread_task_time; + } else { + next_task_time = + std::min(*next_main_thread_task_time, *next_thread_pool_task_time); + } + + if (next_task_time && *next_task_time <= fast_forward_cap) { + { + AutoLock lock(now_ticks_lock_); + // It's possible for |next_task_time| to be in the past in the following + // scenario: + // Start with Now() == 100ms + // Thread A : Post 200ms delayed task T (construct and enqueue) + // Thread B : Construct 20ms delayed task U + // => |delayed_run_time| == 120ms. + // Thread A : FastForwardToNextTaskOrCap() => fast-forwards to T @ + // 300ms (task U is not yet in queue). + // Thread B : Complete enqueue of task U. + // Thread A : FastForwardToNextTaskOrCap() => must stay at 300ms and run + // U, not go back to 120ms. + // Hence we need std::max() to protect again this because construction + // and enqueuing isn't atomic in time (LazyNow support in + // base/task/thread_pool could help). + now_ticks_ = std::max(now_ticks_, *next_task_time); + } + + if (next_task_time == next_thread_pool_task_time) { + // Let the thread pool know that it should post its now ripe delayed + // tasks. + thread_pool_->ProcessRipeDelayedTasksForTesting(); + return NextTaskSource::kThreadPool; + } + return NextTaskSource::kMainThread; + } + + if (!fast_forward_cap.is_max()) { + AutoLock lock(now_ticks_lock_); + // It's possible that Now() is already beyond |fast_forward_cap| when the + // caller nests multiple FastForwardBy() calls. + now_ticks_ = std::max(now_ticks_, fast_forward_cap); + } + + return NextTaskSource::kNone; + } + + private: + SEQUENCE_CHECKER(sequence_checker_); + + sequence_manager::SequenceManager* const sequence_manager_; + + internal::ThreadPoolImpl* thread_pool_ = nullptr; + const TestTaskTracker* thread_pool_task_tracker_ = nullptr; + + // Protects |now_ticks_| + mutable Lock now_ticks_lock_; + + // Only ever written to from the main sequence. Start from real Now() instead + // of zero to give a more realistic view to tests. + TimeTicks now_ticks_ GUARDED_BY(now_ticks_lock_){ + base::subtle::TimeTicksNowIgnoringOverride()}; +}; + +TaskEnvironment::MockTimeDomain* + TaskEnvironment::MockTimeDomain::current_mock_time_domain_ = nullptr; + +TaskEnvironment::TaskEnvironment( + TimeSource time_source, + MainThreadType main_thread_type, + ThreadPoolExecutionMode thread_pool_execution_mode, + ThreadingMode threading_mode, + ThreadPoolCOMEnvironment thread_pool_com_environment, + bool subclass_creates_default_taskrunner, + trait_helpers::NotATraitTag) + : main_thread_type_(main_thread_type), + thread_pool_execution_mode_(thread_pool_execution_mode), + threading_mode_(threading_mode), + thread_pool_com_environment_(thread_pool_com_environment), + subclass_creates_default_taskrunner_(subclass_creates_default_taskrunner), + sequence_manager_( + CreateSequenceManagerForMainThreadType(main_thread_type)), + mock_time_domain_( + time_source != TimeSource::SYSTEM_TIME + ? MockTimeDomain::CreateAndRegister(sequence_manager_.get()) + : nullptr), + time_overrides_(time_source == TimeSource::MOCK_TIME + ? std::make_unique<subtle::ScopedTimeClockOverrides>( + &MockTimeDomain::GetTime, + &MockTimeDomain::GetTimeTicks, + nullptr) + : nullptr), + mock_clock_(mock_time_domain_ ? std::make_unique<TickClockBasedClock>( + mock_time_domain_.get()) + : nullptr), + scoped_lazy_task_runner_list_for_testing_( + std::make_unique<internal::ScopedLazyTaskRunnerListForTesting>()), + // TODO(https://crbug.com/922098): Enable Run() timeouts even for + // instances created with TimeSource::MOCK_TIME. + run_loop_timeout_( + mock_time_domain_ + ? nullptr + : std::make_unique<ScopedRunLoopTimeout>( + FROM_HERE, + TestTimeouts::action_timeout(), + BindRepeating(&sequence_manager::SequenceManager:: + DescribeAllPendingTasks, + Unretained(sequence_manager_.get())))) { + CHECK(!base::ThreadTaskRunnerHandle::IsSet()); + // If |subclass_creates_default_taskrunner| is true then initialization is + // deferred until DeferredInitFromSubclass(). + if (!subclass_creates_default_taskrunner) { + task_queue_ = sequence_manager_->CreateTaskQueue( + sequence_manager::TaskQueue::Spec("task_environment_default") + .SetTimeDomain(mock_time_domain_.get())); + task_runner_ = task_queue_->task_runner(); + sequence_manager_->SetDefaultTaskRunner(task_runner_); + simple_task_executor_ = std::make_unique<SimpleTaskExecutor>(task_runner_); + CHECK(base::ThreadTaskRunnerHandle::IsSet()) + << "ThreadTaskRunnerHandle should've been set now."; + CompleteInitialization(); + } + + if (threading_mode_ != ThreadingMode::MAIN_THREAD_ONLY) + InitializeThreadPool(); + + if (thread_pool_execution_mode_ == ThreadPoolExecutionMode::QUEUED && + task_tracker_) { + CHECK(task_tracker_->DisallowRunTasks()); + } +} + +void TaskEnvironment::InitializeThreadPool() { + CHECK(!ThreadPoolInstance::Get()) + << "Someone has already installed a ThreadPoolInstance. If nothing in " + "your test does so, then a test that ran earlier may have installed " + "one and leaked it. base::TestSuite will trap leaked globals, unless " + "someone has explicitly disabled it with " + "DisableCheckForLeakedGlobals()."; + + ThreadPoolInstance::InitParams init_params(kNumForegroundThreadPoolThreads); + init_params.suggested_reclaim_time = TimeDelta::Max(); +#if defined(OS_WIN) + if (thread_pool_com_environment_ == ThreadPoolCOMEnvironment::COM_MTA) { + init_params.common_thread_pool_environment = + ThreadPoolInstance::InitParams::CommonThreadPoolEnvironment::COM_MTA; + } +#endif + + auto task_tracker = std::make_unique<TestTaskTracker>(); + task_tracker_ = task_tracker.get(); + auto thread_pool = std::make_unique<internal::ThreadPoolImpl>( + std::string(), std::move(task_tracker)); + if (mock_time_domain_) + mock_time_domain_->SetThreadPool(thread_pool.get(), task_tracker_); + ThreadPoolInstance::Set(std::move(thread_pool)); + ThreadPoolInstance::Get()->Start(init_params); +} + +void TaskEnvironment::CompleteInitialization() { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + +#if defined(OS_POSIX) || defined(OS_FUCHSIA) + if (main_thread_type() == MainThreadType::IO) { + file_descriptor_watcher_ = + std::make_unique<FileDescriptorWatcher>(GetMainThreadTaskRunner()); + } +#endif // defined(OS_POSIX) || defined(OS_FUCHSIA) +} + +TaskEnvironment::TaskEnvironment(TaskEnvironment&& other) = default; + +TaskEnvironment::~TaskEnvironment() { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + // If we've been moved then bail out. + if (!owns_instance_) + return; + for (auto& observer : GetDestructionObservers()) + observer.WillDestroyCurrentTaskEnvironment(); + DestroyThreadPool(); + task_queue_ = nullptr; + NotifyDestructionObserversAndReleaseSequenceManager(); +} + +void TaskEnvironment::DestroyThreadPool() { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + if (threading_mode_ == ThreadingMode::MAIN_THREAD_ONLY) + return; + + // Ideally this would RunLoop().RunUntilIdle() here to catch any errors or + // infinite post loop in the remaining work but this isn't possible right now + // because base::~MessageLoop() didn't use to do this and adding it here would + // make the migration away from MessageLoop that much harder. + + // Without FlushForTesting(), DeleteSoon() and ReleaseSoon() tasks could be + // skipped, resulting in memory leaks. + task_tracker_->AllowRunTasks(); + ThreadPoolInstance::Get()->FlushForTesting(); + ThreadPoolInstance::Get()->Shutdown(); + ThreadPoolInstance::Get()->JoinForTesting(); + // Destroying ThreadPoolInstance state can result in waiting on worker + // threads. Make sure this is allowed to avoid flaking tests that have + // disallowed waits on their main thread. + ScopedAllowBaseSyncPrimitivesForTesting allow_waits_to_destroy_task_tracker; + ThreadPoolInstance::Set(nullptr); +} + +sequence_manager::TimeDomain* TaskEnvironment::GetTimeDomain() const { + return mock_time_domain_ ? mock_time_domain_.get() + : sequence_manager_->GetRealTimeDomain(); +} + +sequence_manager::SequenceManager* TaskEnvironment::sequence_manager() const { + DCHECK(subclass_creates_default_taskrunner_); + return sequence_manager_.get(); +} + +void TaskEnvironment::DeferredInitFromSubclass( + scoped_refptr<base::SingleThreadTaskRunner> task_runner) { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + task_runner_ = std::move(task_runner); + sequence_manager_->SetDefaultTaskRunner(task_runner_); + CompleteInitialization(); +} + +void TaskEnvironment::NotifyDestructionObserversAndReleaseSequenceManager() { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + // A derived classes may call this method early. + if (!sequence_manager_) + return; + + if (mock_time_domain_) + sequence_manager_->UnregisterTimeDomain(mock_time_domain_.get()); + + sequence_manager_.reset(); +} + +scoped_refptr<base::SingleThreadTaskRunner> +TaskEnvironment::GetMainThreadTaskRunner() { + DCHECK(task_runner_); + return task_runner_; +} + +bool TaskEnvironment::MainThreadIsIdle() const { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + sequence_manager::internal::SequenceManagerImpl* sequence_manager_impl = + static_cast<sequence_manager::internal::SequenceManagerImpl*>( + sequence_manager_.get()); + // ReclaimMemory sweeps canceled delayed tasks. + sequence_manager_impl->ReclaimMemory(); + return sequence_manager_impl->IsIdleForTesting(); +} + +void TaskEnvironment::RunUntilIdle() { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + if (threading_mode_ == ThreadingMode::MAIN_THREAD_ONLY) { + RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle(); + return; + } + + // TODO(gab): This can be heavily simplified to essentially: + // bool HasMainThreadTasks() { + // if (message_loop_) + // return !message_loop_->IsIdleForTesting(); + // return mock_time_task_runner_->NextPendingTaskDelay().is_zero(); + // } + // while (task_tracker_->HasIncompleteTasks() || HasMainThreadTasks()) { + // base::RunLoop().RunUntilIdle(); + // // Avoid busy-looping. + // if (task_tracker_->HasIncompleteTasks()) + // PlatformThread::Sleep(TimeDelta::FromMilliSeconds(1)); + // } + // Update: This can likely be done now that MessageLoop::IsIdleForTesting() + // checks all queues. + // + // Other than that it works because once |task_tracker_->HasIncompleteTasks()| + // is false we know for sure that the only thing that can make it true is a + // main thread task (TaskEnvironment owns all the threads). As such we can't + // racily see it as false on the main thread and be wrong as if it the main + // thread sees the atomic count at zero, it's the only one that can make it go + // up. And the only thing that can make it go up on the main thread are main + // thread tasks and therefore we're done if there aren't any left. + // + // This simplification further allows simplification of DisallowRunTasks(). + // + // This can also be simplified even further once TaskTracker becomes directly + // aware of main thread tasks. https://crbug.com/660078. + + const bool could_run_tasks = task_tracker_->AllowRunTasks(); + + for (;;) { + task_tracker_->AllowRunTasks(); + + // First run as many tasks as possible on the main thread in parallel with + // tasks in ThreadPool. This increases likelihood of TSAN catching + // threading errors and eliminates possibility of hangs should a + // ThreadPool task synchronously block on a main thread task + // (ThreadPoolInstance::FlushForTesting() can't be used here for that + // reason). + RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle(); + + // Then halt ThreadPool. DisallowRunTasks() failing indicates that there + // were ThreadPool tasks currently running. In that case, try again from + // top when DisallowRunTasks() yields control back to this thread as they + // may have posted main thread tasks. + if (!task_tracker_->DisallowRunTasks()) + continue; + + // Once ThreadPool is halted. Run any remaining main thread tasks (which + // may have been posted by ThreadPool tasks that completed between the + // above main thread RunUntilIdle() and ThreadPool DisallowRunTasks()). + // Note: this assumes that no main thread task synchronously blocks on a + // ThreadPool tasks (it certainly shouldn't); this call could otherwise + // hang. + RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle(); + + // The above RunUntilIdle() guarantees there are no remaining main thread + // tasks (the ThreadPool being halted during the last RunUntilIdle() is + // key as it prevents a task being posted to it racily with it determining + // it had no work remaining). Therefore, we're done if there is no more work + // on ThreadPool either (there can be ThreadPool work remaining if + // DisallowRunTasks() preempted work and/or the last RunUntilIdle() posted + // more ThreadPool tasks). + // Note: this last |if| couldn't be turned into a |do {} while();|. A + // conditional loop makes it such that |continue;| results in checking the + // condition (not unconditionally loop again) which would be incorrect for + // the above logic as it'd then be possible for a ThreadPool task to be + // running during the DisallowRunTasks() test, causing it to fail, but then + // post to the main thread and complete before the loop's condition is + // verified which could result in HasIncompleteUndelayedTasksForTesting() + // returning false and the loop erroneously exiting with a pending task on + // the main thread. + if (!task_tracker_->HasIncompleteTaskSourcesForTesting()) + break; + } + + // The above loop always ends with running tasks being disallowed. Re-enable + // parallel execution before returning if it was allowed at the beginning of + // this call. + if (could_run_tasks) + task_tracker_->AllowRunTasks(); +} + +void TaskEnvironment::FastForwardBy(TimeDelta delta) { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + DCHECK(mock_time_domain_); + DCHECK_GE(delta, TimeDelta()); + + const bool could_run_tasks = task_tracker_ && task_tracker_->AllowRunTasks(); + + const TimeTicks fast_forward_until = mock_time_domain_->NowTicks() + delta; + do { + RunUntilIdle(); + } while (mock_time_domain_->FastForwardToNextTaskOrCap(fast_forward_until) != + MockTimeDomain::NextTaskSource::kNone); + + if (task_tracker_ && !could_run_tasks) + task_tracker_->DisallowRunTasks(); +} + +void TaskEnvironment::FastForwardUntilNoTasksRemain() { + // TimeTicks::operator+(TimeDelta) uses saturated arithmetic so it's safe to + // pass in TimeDelta::Max(). + FastForwardBy(TimeDelta::Max()); +} + +void TaskEnvironment::AdvanceClock(TimeDelta delta) { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + DCHECK(mock_time_domain_); + DCHECK_GE(delta, TimeDelta()); + mock_time_domain_->AdvanceClock(delta); +} + +const TickClock* TaskEnvironment::GetMockTickClock() const { + DCHECK(mock_time_domain_); + return mock_time_domain_.get(); +} + +base::TimeTicks TaskEnvironment::NowTicks() const { + DCHECK(mock_time_domain_); + return mock_time_domain_->Now(); +} + +const Clock* TaskEnvironment::GetMockClock() const { + DCHECK(mock_clock_); + return mock_clock_.get(); +} + +size_t TaskEnvironment::GetPendingMainThreadTaskCount() const { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + // ReclaimMemory sweeps canceled delayed tasks. + sequence_manager_->ReclaimMemory(); + return sequence_manager_->GetPendingTaskCountForTesting(); +} + +TimeDelta TaskEnvironment::NextMainThreadPendingTaskDelay() const { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + // ReclaimMemory sweeps canceled delayed tasks. + sequence_manager_->ReclaimMemory(); + DCHECK(mock_time_domain_); + Optional<TimeTicks> run_time = mock_time_domain_->NextScheduledRunTime(); + if (run_time) + return *run_time - mock_time_domain_->Now(); + return TimeDelta::Max(); +} + +bool TaskEnvironment::NextTaskIsDelayed() const { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + + TimeDelta delay = NextMainThreadPendingTaskDelay(); + return !delay.is_zero() && !delay.is_max(); +} + +void TaskEnvironment::DescribePendingMainThreadTasks() const { + DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_); + LOG(INFO) << sequence_manager_->DescribeAllPendingTasks(); +} + +// static +void TaskEnvironment::AddDestructionObserver(DestructionObserver* observer) { + GetDestructionObservers().AddObserver(observer); +} + +// static +void TaskEnvironment::RemoveDestructionObserver(DestructionObserver* observer) { + GetDestructionObservers().RemoveObserver(observer); +} + +TaskEnvironment::TestTaskTracker::TestTaskTracker() + : internal::ThreadPoolImpl::TaskTrackerImpl(std::string()), + can_run_tasks_cv_(&lock_), + task_completed_cv_(&lock_) { + // Consider threads blocked on these as idle (avoids instantiating + // ScopedBlockingCalls and confusing some //base internals tests). + can_run_tasks_cv_.declare_only_used_while_idle(); + task_completed_cv_.declare_only_used_while_idle(); +} + +bool TaskEnvironment::TestTaskTracker::AllowRunTasks() { + AutoLock auto_lock(lock_); + const bool could_run_tasks = can_run_tasks_; + can_run_tasks_ = true; + can_run_tasks_cv_.Broadcast(); + return could_run_tasks; +} + +bool TaskEnvironment::TestTaskTracker::TasksAllowedToRun() const { + AutoLock auto_lock(lock_); + return can_run_tasks_; +} + +bool TaskEnvironment::TestTaskTracker::DisallowRunTasks() { + AutoLock auto_lock(lock_); + + // Can't disallow run task if there are tasks running. + if (num_tasks_running_ > 0) { + // Attempt to wait a bit so that the caller doesn't busy-loop with the same + // set of pending work. A short wait is required to avoid deadlock + // scenarios. See DisallowRunTasks()'s declaration for more details. + task_completed_cv_.TimedWait(TimeDelta::FromMilliseconds(1)); + return false; + } + + can_run_tasks_ = false; + return true; +} + +void TaskEnvironment::TestTaskTracker::RunTask(internal::Task task, + internal::TaskSource* sequence, + const TaskTraits& traits) { + { + AutoLock auto_lock(lock_); + + while (!can_run_tasks_) + can_run_tasks_cv_.Wait(); + + ++num_tasks_running_; + } + + { + // Using TimeTicksNowIgnoringOverride() because in tests that mock time, + // Now() can advance very far very fast, and that's not a problem. This is + // watching for tests that have actually long running tasks which cause our + // test suites to run slowly. + base::TimeTicks before = base::subtle::TimeTicksNowIgnoringOverride(); + internal::ThreadPoolImpl::TaskTrackerImpl::RunTask(std::move(task), + sequence, traits); + base::TimeTicks after = base::subtle::TimeTicksNowIgnoringOverride(); + + if ((after - before) > TestTimeouts::action_max_timeout()) { + ADD_FAILURE() << "TaskEnvironment: RunTask took more than " + << TestTimeouts::action_max_timeout().InSeconds() + << " seconds. " + << "Posted from " << task.posted_from.ToString(); + } + } + + { + AutoLock auto_lock(lock_); + + CHECK_GT(num_tasks_running_, 0); + CHECK(can_run_tasks_); + + --num_tasks_running_; + + task_completed_cv_.Broadcast(); + } +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/task_environment.h b/chromium/base/test/task_environment.h new file mode 100644 index 00000000000..846f10e34ae --- /dev/null +++ b/chromium/base/test/task_environment.h @@ -0,0 +1,445 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TASK_ENVIRONMENT_H_ +#define BASE_TEST_TASK_ENVIRONMENT_H_ + +#include <memory> + +#include "base/compiler_specific.h" +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/observer_list.h" +#include "base/single_thread_task_runner.h" +#include "base/task/lazy_thread_pool_task_runner.h" +#include "base/task/sequence_manager/sequence_manager.h" +#include "base/test/scoped_run_loop_timeout.h" +#include "base/threading/thread_checker.h" +#include "base/time/time.h" +#include "base/traits_bag.h" +#include "build/build_config.h" + +namespace base { + +class Clock; +class FileDescriptorWatcher; +class SimpleTaskExecutor; +class TickClock; + +namespace subtle { +class ScopedTimeClockOverrides; +} + +namespace test { + +// This header exposes SingleThreadTaskEnvironment and TaskEnvironment. +// +// SingleThreadTaskEnvironment enables the following APIs within its scope: +// - (Thread|Sequenced)TaskRunnerHandle on the main thread +// - RunLoop on the main thread +// +// TaskEnvironment additionally enables: +// - posting to base::ThreadPool through base/task/thread_pool.h. +// +// Hint: For content::BrowserThreads, use content::BrowserTaskEnvironment. +// +// Tests should prefer SingleThreadTaskEnvironment over TaskEnvironment when the +// former is sufficient. +// +// Tasks posted to the (Thread|Sequenced)TaskRunnerHandle run synchronously when +// RunLoop::Run(UntilIdle) or TaskEnvironment::RunUntilIdle is called on the +// main thread. +// +// The TaskEnvironment requires TestTimeouts::Initialize() to be called in order +// to run posted tasks, so that it can watch for problematic long-running tasks. +// +// The TimeSource trait can be used to request that delayed tasks be under the +// manual control of RunLoop::Run() and TaskEnvironment::FastForward*() methods. +// +// If a TaskEnvironment's ThreadPoolExecutionMode is QUEUED, ThreadPool tasks +// run when RunUntilIdle() or ~TaskEnvironment is called. If +// ThreadPoolExecutionMode is ASYNC, they run as they are posted. +// +// All TaskEnvironment methods must be called from the main thread. +// +// Usage: +// +// class MyTestFixture : public testing::Test { +// public: +// (...) +// +// // protected rather than private visibility will allow controlling the +// // task environment (e.g. RunUntilIdle(), FastForwardBy(), etc.). from the +// // test body. +// protected: +// // Must generally be the first member to be initialized first and +// // destroyed last (some members that require single-threaded +// // initialization and tear down may need to come before -- e.g. +// // base::test::ScopedFeatureList). Extra traits, like TimeSource, are +// // best provided inline when declaring the TaskEnvironment, as +// // such: +// base::test::TaskEnvironment task_environment_{ +// base::test::TaskEnvironment::TimeSource::MOCK_TIME}; +// +// // Other members go here (or further below in private section.) +// }; +class TaskEnvironment { + protected: + // This enables a two-phase initialization for sub classes such as + // content::BrowserTaskEnvironment which need to provide the default task + // queue because they instantiate a scheduler on the same thread. Subclasses + // using this trait must invoke DeferredInitFromSubclass() before running the + // task environment. + struct SubclassCreatesDefaultTaskRunner {}; + + public: + enum class TimeSource { + // Delayed tasks and Time/TimeTicks::Now() use the real-time system clock. + SYSTEM_TIME, + + // Delayed tasks use a mock clock which only advances when reaching "idle" + // during a RunLoop::Run() call on the main thread or a FastForward*() call + // to this TaskEnvironment. "idle" is defined as the main thread and thread + // pool being out of ready tasks. In that situation : time advances to the + // soonest delay between main thread and thread pool delayed tasks, + // according to the semantics of the current Run*() or FastForward*() call. + // + // This also mocks Time/TimeTicks::Now() with the same mock clock. + // + // Warning some platform APIs are still real-time, e.g.: + // * PlatformThread::Sleep + // * WaitableEvent::TimedWait + // * ConditionVariable::TimedWait + // * Delayed tasks on unmanaged base::Thread's and other custom task + // runners. + MOCK_TIME, + + DEFAULT = SYSTEM_TIME + }; + + // This type will determine what types of messages will get pumped by the main + // thread. + // Note: If your test needs to use a custom MessagePump you should + // consider using a SingleThreadTaskExecutor instead. + enum class MainThreadType { + // The main thread doesn't pump system messages. + DEFAULT, + // The main thread pumps UI messages. + UI, + // The main thread pumps asynchronous IO messages and supports the + // FileDescriptorWatcher API on POSIX. + IO, + }; + + // Note that this is irrelevant (and ignored) under + // ThreadingMode::MAIN_THREAD_ONLY + enum class ThreadPoolExecutionMode { + // Thread pool tasks are queued and only executed when RunUntilIdle(), + // FastForwardBy(), or FastForwardUntilNoTasksRemain() are explicitly + // called. Note: RunLoop::Run() does *not* unblock the ThreadPool in this + // mode (it strictly runs only the main thread). + QUEUED, + // Thread pool tasks run as they are posted. RunUntilIdle() can still be + // used to block until done. + // Note that regardless of this trait, delayed tasks are always "queued" + // under TimeSource::MOCK_TIME mode. + ASYNC, + DEFAULT = ASYNC + }; + + enum class ThreadingMode { + // ThreadPool will be initialized, thus adding support for multi-threaded + // tests. + MULTIPLE_THREADS, + // No thread pool will be initialized. Useful for tests that want to run + // single threaded. Prefer using SingleThreadTaskEnvironment over this + // trait. + MAIN_THREAD_ONLY, + DEFAULT = MULTIPLE_THREADS + }; + + // On Windows, sets the COM environment for the ThreadPoolInstance. Ignored + // on other platforms. + enum class ThreadPoolCOMEnvironment { + // Do not initialize COM for the pool's workers. + NONE, + + // Place the pool's workers in a COM MTA. + COM_MTA, + + // Enable the MTA by default in unit tests to match the browser process's + // ThreadPoolInstance configuration. + // + // This has the adverse side-effect of enabling the MTA in non-browser unit + // tests as well but the downside there is not as bad as not having it in + // browser unit tests. It just means some COM asserts may pass in unit + // tests where they wouldn't in integration tests or prod. That's okay + // because unit tests are already generally very loose on allowing I/O, + // waits, etc. Such misuse will still be caught in later phases (and COM + // usage should already be pretty much inexistent in sandboxed processes). + DEFAULT = COM_MTA, + }; + + // List of traits that are valid inputs for the constructor below. + struct ValidTraits { + ValidTraits(TimeSource); + ValidTraits(MainThreadType); + ValidTraits(ThreadPoolExecutionMode); + ValidTraits(SubclassCreatesDefaultTaskRunner); + ValidTraits(ThreadingMode); + ValidTraits(ThreadPoolCOMEnvironment); + }; + + // Constructor accepts zero or more traits which customize the testing + // environment. + template <typename... TaskEnvironmentTraits, + class CheckArgumentsAreValid = std::enable_if_t< + trait_helpers::AreValidTraits<ValidTraits, + TaskEnvironmentTraits...>::value>> + NOINLINE explicit TaskEnvironment(TaskEnvironmentTraits... traits) + : TaskEnvironment( + trait_helpers::GetEnum<TimeSource, TimeSource::DEFAULT>(traits...), + trait_helpers::GetEnum<MainThreadType, MainThreadType::DEFAULT>( + traits...), + trait_helpers::GetEnum<ThreadPoolExecutionMode, + ThreadPoolExecutionMode::DEFAULT>(traits...), + trait_helpers::GetEnum<ThreadingMode, ThreadingMode::DEFAULT>( + traits...), + trait_helpers::GetEnum<ThreadPoolCOMEnvironment, + ThreadPoolCOMEnvironment::DEFAULT>( + traits...), + trait_helpers::HasTrait<SubclassCreatesDefaultTaskRunner, + TaskEnvironmentTraits...>(), + trait_helpers::NotATraitTag()) {} + + // Waits until no undelayed ThreadPool tasks remain. Then, unregisters the + // ThreadPoolInstance and the (Thread|Sequenced)TaskRunnerHandle. + virtual ~TaskEnvironment(); + + // Returns a TaskRunner that schedules tasks on the main thread. + scoped_refptr<base::SingleThreadTaskRunner> GetMainThreadTaskRunner(); + + // Returns whether the main thread's TaskRunner has pending tasks. This will + // always return true if called right after RunUntilIdle. + bool MainThreadIsIdle() const; + + // Runs tasks until both the (Thread|Sequenced)TaskRunnerHandle and the + // ThreadPool's non-delayed queues are empty. + // While RunUntilIdle() is quite practical and sometimes even necessary -- for + // example, to flush all tasks bound to Unretained() state before destroying + // test members -- it should be used with caution per the following warnings: + // + // WARNING #1: This may run long (flakily timeout) and even never return! Do + // not use this when repeating tasks such as animated web pages + // are present. + // WARNING #2: This may return too early! For example, if used to run until an + // incoming event has occurred but that event depends on a task in + // a different queue -- e.g. a standalone base::Thread or a system + // event. + // + // As such, prefer RunLoop::Run() with an explicit RunLoop::QuitClosure() when + // possible. + void RunUntilIdle(); + + // Only valid for instances using TimeSource::MOCK_TIME. Fast-forwards + // virtual time by |delta|, causing all tasks on the main thread and thread + // pool with a remaining delay less than or equal to |delta| to be executed in + // their natural order before this returns. |delta| must be non-negative. Upon + // returning from this method, NowTicks() will be >= the initial |NowTicks() + + // delta|. It is guaranteed to be == iff tasks executed in this + // FastForwardBy() didn't result in nested calls to time-advancing-methods. + void FastForwardBy(TimeDelta delta); + + // Only valid for instances using TimeSource::MOCK_TIME. + // Short for FastForwardBy(TimeDelta::Max()). + // + // WARNING: This has the same caveat as RunUntilIdle() and is even more likely + // to spin forever (any RepeatingTimer will cause this). + void FastForwardUntilNoTasksRemain(); + + // Only valid for instances using TimeSource::MOCK_TIME. Advances virtual time + // by |delta|. Unlike FastForwardBy, this does not run tasks. Prefer + // FastForwardBy() when possible but this can be useful when testing blocked + // pending tasks where being idle (required to fast-forward) is not possible. + // + // Delayed tasks that are ripe as a result of this will be scheduled. + // RunUntilIdle() can be used after this call to ensure those tasks have run. + // Note: AdvanceClock(delta) + RunUntilIdle() is slightly different from + // FastForwardBy(delta) in that time passes instantly before running any task + // (whereas FastForwardBy() will advance the clock in the smallest increments + // possible at a time). Hence FastForwardBy() is more realistic but + // AdvanceClock() can be useful when testing edge case scenarios that + // specifically handle more time than expected to have passed. + void AdvanceClock(TimeDelta delta); + + // Only valid for instances using TimeSource::MOCK_TIME. Returns a + // TickClock whose time is updated by FastForward(By|UntilNoTasksRemain). + const TickClock* GetMockTickClock() const; + std::unique_ptr<TickClock> DeprecatedGetMockTickClock(); + + // Only valid for instances using TimeSource::MOCK_TIME. Returns a + // Clock whose time is updated by FastForward(By|UntilNoTasksRemain). The + // initial value is implementation defined and should be queried by tests that + // depend on it. + // TickClock should be used instead of Clock to measure elapsed time in a + // process. See time.h. + const Clock* GetMockClock() const; + + // Only valid for instances using TimeSource::MOCK_TIME. Returns the current + // virtual tick time (based on a realistic Now(), sampled when this + // TaskEnvironment was created, and manually advanced from that point on). + // This is always equivalent to base::TimeTicks::Now() under + // TimeSource::MOCK_TIME. + base::TimeTicks NowTicks() const; + + // Only valid for instances using TimeSource::MOCK_TIME. Returns the + // number of pending tasks (delayed and non-delayed) of the main thread's + // TaskRunner. When debugging, you can use DescribePendingMainThreadTasks() to + // see what those are. + size_t GetPendingMainThreadTaskCount() const; + + // Only valid for instances using TimeSource::MOCK_TIME. + // Returns the delay until the next pending task of the main thread's + // TaskRunner if there is one, otherwise it returns TimeDelta::Max(). + TimeDelta NextMainThreadPendingTaskDelay() const; + + // Only valid for instances using TimeSource::MOCK_TIME. + // Returns true iff the next task is delayed. Returns false if the next task + // is immediate or if there is no next task. + bool NextTaskIsDelayed() const; + + // For debugging purposes: Dumps information about pending tasks on the main + // thread. + void DescribePendingMainThreadTasks() const; + + class DestructionObserver : public CheckedObserver { + public: + DestructionObserver() = default; + ~DestructionObserver() override = default; + + DestructionObserver(const DestructionObserver&) = delete; + DestructionObserver& operator=(const DestructionObserver&) = delete; + + virtual void WillDestroyCurrentTaskEnvironment() = 0; + }; + + // Adds/removes a DestructionObserver to any TaskEnvironment. Observers are + // notified when any TaskEnvironment goes out of scope (other than with a move + // operation). Must be called on the main thread. + static void AddDestructionObserver(DestructionObserver* observer); + static void RemoveDestructionObserver(DestructionObserver* observer); + + // The number of foreground workers in the ThreadPool managed by a + // TaskEnvironment instance. This can be used to determine the maximum + // parallelism in tests that require each parallel task it spawns to be + // running at once. Having multiple threads prevents deadlocks should some + // blocking APIs not use ScopedBlockingCall. It also allows enough concurrency + // to allow TSAN to spot data races. + static constexpr int kNumForegroundThreadPoolThreads = 4; + + protected: + explicit TaskEnvironment(TaskEnvironment&& other); + + constexpr MainThreadType main_thread_type() const { + return main_thread_type_; + } + + constexpr ThreadPoolExecutionMode thread_pool_execution_mode() const { + return thread_pool_execution_mode_; + } + + // Returns the TimeDomain driving this TaskEnvironment. + sequence_manager::TimeDomain* GetTimeDomain() const; + + sequence_manager::SequenceManager* sequence_manager() const; + + void DeferredInitFromSubclass( + scoped_refptr<base::SingleThreadTaskRunner> task_runner); + + // Derived classes may need to control when the sequence manager goes away. + void NotifyDestructionObserversAndReleaseSequenceManager(); + + private: + class TestTaskTracker; + class MockTimeDomain; + + void InitializeThreadPool(); + void DestroyThreadPool(); + + void CompleteInitialization(); + + // The template constructor has to be in the header but it delegates to this + // constructor to initialize all other members out-of-line. + TaskEnvironment(TimeSource time_source, + MainThreadType main_thread_type, + ThreadPoolExecutionMode thread_pool_execution_mode, + ThreadingMode threading_mode, + ThreadPoolCOMEnvironment thread_pool_com_environment, + bool subclass_creates_default_taskrunner, + trait_helpers::NotATraitTag tag); + + const MainThreadType main_thread_type_; + const ThreadPoolExecutionMode thread_pool_execution_mode_; + const ThreadingMode threading_mode_; + const ThreadPoolCOMEnvironment thread_pool_com_environment_; + const bool subclass_creates_default_taskrunner_; + + std::unique_ptr<sequence_manager::SequenceManager> sequence_manager_; + + // Manages the clock under TimeSource::MOCK_TIME modes. Null in + // TimeSource::SYSTEM_TIME mode. + std::unique_ptr<MockTimeDomain> mock_time_domain_; + + // Overrides Time/TimeTicks::Now() under TimeSource::MOCK_TIME_AND_NOW mode. + // Null in other modes. + std::unique_ptr<subtle::ScopedTimeClockOverrides> time_overrides_; + + scoped_refptr<sequence_manager::TaskQueue> task_queue_; + scoped_refptr<base::SingleThreadTaskRunner> task_runner_; + + // Only set for instances using TimeSource::MOCK_TIME. + std::unique_ptr<Clock> mock_clock_; + +#if defined(OS_POSIX) || defined(OS_FUCHSIA) + // Enables the FileDescriptorWatcher API iff running a MainThreadType::IO. + std::unique_ptr<FileDescriptorWatcher> file_descriptor_watcher_; +#endif + + // Owned by the ThreadPoolInstance. + TestTaskTracker* task_tracker_ = nullptr; + + // Ensures destruction of lazy TaskRunners when this is destroyed. + std::unique_ptr<internal::ScopedLazyTaskRunnerListForTesting> + scoped_lazy_task_runner_list_for_testing_; + + // Sets RunLoop::Run() to LOG(FATAL) if not Quit() in a timely manner. + std::unique_ptr<ScopedRunLoopTimeout> run_loop_timeout_; + + std::unique_ptr<bool> owns_instance_ = std::make_unique<bool>(true); + + // To support base::CurrentThread(). + std::unique_ptr<SimpleTaskExecutor> simple_task_executor_; + + // Used to verify thread-affinity of operations that must occur on the main + // thread. This is the case for anything that modifies or drives the + // |sequence_manager_|. + THREAD_CHECKER(main_thread_checker_); + + DISALLOW_COPY_AND_ASSIGN(TaskEnvironment); +}; + +// SingleThreadTaskEnvironment takes the same traits as TaskEnvironment and is +// used the exact same way. It's a short-form for +// TaskEnvironment{TaskEnvironment::ThreadingMode::MAIN_THREAD_ONLY, ...}; +class SingleThreadTaskEnvironment : public TaskEnvironment { + public: + template <class... ArgTypes> + SingleThreadTaskEnvironment(ArgTypes... args) + : TaskEnvironment(ThreadingMode::MAIN_THREAD_ONLY, args...) {} +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_TASK_ENVIRONMENT_H_ diff --git a/chromium/base/test/task_environment_unittest.cc b/chromium/base/test/task_environment_unittest.cc new file mode 100644 index 00000000000..5d20d60f2c1 --- /dev/null +++ b/chromium/base/test/task_environment_unittest.cc @@ -0,0 +1,1274 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/task_environment.h" + +#include <atomic> +#include <memory> + +#include "base/atomicops.h" +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/cancelable_callback.h" +#include "base/debug/debugger.h" +#include "base/message_loop/message_loop_current.h" +#include "base/run_loop.h" +#include "base/synchronization/atomic_flag.h" +#include "base/synchronization/waitable_event.h" +#include "base/task/sequence_manager/time_domain.h" +#include "base/task/thread_pool.h" +#include "base/task/thread_pool/thread_pool_instance.h" +#include "base/test/bind_test_util.h" +#include "base/test/gtest_util.h" +#include "base/test/mock_callback.h" +#include "base/test/mock_log.h" +#include "base/test/scoped_run_loop_timeout.h" +#include "base/test/test_timeouts.h" +#include "base/threading/platform_thread.h" +#include "base/threading/sequence_local_storage_slot.h" +#include "base/threading/sequenced_task_runner_handle.h" +#include "base/threading/thread.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/time/clock.h" +#include "base/time/default_clock.h" +#include "base/time/tick_clock.h" +#include "base/win/com_init_util.h" +#include "build/build_config.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest-spi.h" +#include "testing/gtest/include/gtest/gtest.h" + +#if defined(OS_POSIX) +#include <unistd.h> + +#include "base/files/file_descriptor_watcher_posix.h" +#endif // defined(OS_POSIX) + +#if defined(OS_WIN) +#include "base/win/scoped_com_initializer.h" +#endif + +namespace base { +namespace test { + +namespace { + +using ::testing::_; +using ::testing::HasSubstr; +using ::testing::IsNull; +using ::testing::Not; +using ::testing::Return; + +class TaskEnvironmentTest : public testing::Test {}; + +void VerifyRunUntilIdleDidNotReturnAndSetFlag( + AtomicFlag* run_until_idle_returned, + AtomicFlag* task_ran) { + EXPECT_FALSE(run_until_idle_returned->IsSet()); + task_ran->Set(); +} + +void RunUntilIdleTest( + TaskEnvironment::ThreadPoolExecutionMode thread_pool_execution_mode) { + AtomicFlag run_until_idle_returned; + TaskEnvironment task_environment(thread_pool_execution_mode); + + AtomicFlag first_main_thread_task_ran; + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag, + Unretained(&run_until_idle_returned), + Unretained(&first_main_thread_task_ran))); + + AtomicFlag first_thread_pool_task_ran; + ThreadPool::PostTask(FROM_HERE, + BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag, + Unretained(&run_until_idle_returned), + Unretained(&first_thread_pool_task_ran))); + + AtomicFlag second_thread_pool_task_ran; + AtomicFlag second_main_thread_task_ran; + ThreadPool::PostTaskAndReply( + FROM_HERE, + BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag, + Unretained(&run_until_idle_returned), + Unretained(&second_thread_pool_task_ran)), + BindOnce(&VerifyRunUntilIdleDidNotReturnAndSetFlag, + Unretained(&run_until_idle_returned), + Unretained(&second_main_thread_task_ran))); + + task_environment.RunUntilIdle(); + run_until_idle_returned.Set(); + + EXPECT_TRUE(first_main_thread_task_ran.IsSet()); + EXPECT_TRUE(first_thread_pool_task_ran.IsSet()); + EXPECT_TRUE(second_thread_pool_task_ran.IsSet()); + EXPECT_TRUE(second_main_thread_task_ran.IsSet()); +} + +} // namespace + +TEST_F(TaskEnvironmentTest, QueuedRunUntilIdle) { + RunUntilIdleTest(TaskEnvironment::ThreadPoolExecutionMode::QUEUED); +} + +TEST_F(TaskEnvironmentTest, AsyncRunUntilIdle) { + RunUntilIdleTest(TaskEnvironment::ThreadPoolExecutionMode::ASYNC); +} + +// Verify that tasks posted to an ThreadPoolExecutionMode::QUEUED +// TaskEnvironment do not run outside of RunUntilIdle(). +TEST_F(TaskEnvironmentTest, QueuedTasksDoNotRunOutsideOfRunUntilIdle) { + TaskEnvironment task_environment( + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + AtomicFlag run_until_idle_called; + ThreadPool::PostTask(FROM_HERE, + BindOnce( + [](AtomicFlag* run_until_idle_called) { + EXPECT_TRUE(run_until_idle_called->IsSet()); + }, + Unretained(&run_until_idle_called))); + PlatformThread::Sleep(TestTimeouts::tiny_timeout()); + run_until_idle_called.Set(); + task_environment.RunUntilIdle(); + + AtomicFlag other_run_until_idle_called; + ThreadPool::PostTask(FROM_HERE, + BindOnce( + [](AtomicFlag* other_run_until_idle_called) { + EXPECT_TRUE(other_run_until_idle_called->IsSet()); + }, + Unretained(&other_run_until_idle_called))); + PlatformThread::Sleep(TestTimeouts::tiny_timeout()); + other_run_until_idle_called.Set(); + task_environment.RunUntilIdle(); +} + +// Verify that a task posted to an ThreadPoolExecutionMode::ASYNC +// TaskEnvironment can run without a call to RunUntilIdle(). +TEST_F(TaskEnvironmentTest, AsyncTasksRunAsTheyArePosted) { + TaskEnvironment task_environment( + TaskEnvironment::ThreadPoolExecutionMode::ASYNC); + + WaitableEvent task_ran; + ThreadPool::PostTask(FROM_HERE, + BindOnce(&WaitableEvent::Signal, Unretained(&task_ran))); + task_ran.Wait(); +} + +// Verify that a task posted to an ThreadPoolExecutionMode::ASYNC +// TaskEnvironment after a call to RunUntilIdle() can run without another +// call to RunUntilIdle(). +TEST_F(TaskEnvironmentTest, AsyncTasksRunAsTheyArePostedAfterRunUntilIdle) { + TaskEnvironment task_environment( + TaskEnvironment::ThreadPoolExecutionMode::ASYNC); + + task_environment.RunUntilIdle(); + + WaitableEvent task_ran; + ThreadPool::PostTask(FROM_HERE, + BindOnce(&WaitableEvent::Signal, Unretained(&task_ran))); + task_ran.Wait(); +} + +void DelayedTasksTest(TaskEnvironment::TimeSource time_source) { + // Use a QUEUED execution-mode environment, so that no tasks are actually + // executed until RunUntilIdle()/FastForwardBy() are invoked. + TaskEnvironment task_environment( + time_source, TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + subtle::Atomic32 counter = 0; + + constexpr base::TimeDelta kShortTaskDelay = TimeDelta::FromDays(1); + // Should run only in MOCK_TIME environment when time is fast-forwarded. + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, 4); + }, + Unretained(&counter)), + kShortTaskDelay); + ThreadPool::PostDelayedTask(FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, + 128); + }, + Unretained(&counter)), + kShortTaskDelay); + + constexpr base::TimeDelta kLongTaskDelay = TimeDelta::FromDays(7); + // Same as first task, longer delays to exercise + // FastForwardUntilNoTasksRemain(). + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, 8); + }, + Unretained(&counter)), + TimeDelta::FromDays(5)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, 16); + }, + Unretained(&counter)), + kLongTaskDelay); + ThreadPool::PostDelayedTask(FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, + 256); + }, + Unretained(&counter)), + kLongTaskDelay * 2); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, 512); + }, + Unretained(&counter)), + kLongTaskDelay * 3); + ThreadPool::PostDelayedTask(FROM_HERE, + BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, + 1024); + }, + Unretained(&counter)), + kLongTaskDelay * 4); + + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement(counter, 1); + }, + Unretained(&counter))); + ThreadPool::PostTask(FROM_HERE, BindOnce( + [](subtle::Atomic32* counter) { + subtle::NoBarrier_AtomicIncrement( + counter, 2); + }, + Unretained(&counter))); + + // This expectation will fail flakily if the preceding PostTask() is executed + // asynchronously, indicating a problem with the QUEUED execution mode. + int expected_value = 0; + EXPECT_EQ(expected_value, counter); + + // RunUntilIdle() should process non-delayed tasks only in all queues. + task_environment.RunUntilIdle(); + expected_value += 1; + expected_value += 2; + EXPECT_EQ(expected_value, counter); + + if (time_source == TaskEnvironment::TimeSource::MOCK_TIME) { + const TimeTicks start_time = task_environment.NowTicks(); + + // Delay inferior to the delay of the first posted task. + constexpr base::TimeDelta kInferiorTaskDelay = TimeDelta::FromSeconds(1); + static_assert(kInferiorTaskDelay < kShortTaskDelay, + "|kInferiorTaskDelay| should be " + "set to a value inferior to the first posted task's delay."); + task_environment.FastForwardBy(kInferiorTaskDelay); + EXPECT_EQ(expected_value, counter); + + task_environment.FastForwardBy(kShortTaskDelay - kInferiorTaskDelay); + expected_value += 4; + expected_value += 128; + EXPECT_EQ(expected_value, counter); + + task_environment.FastForwardUntilNoTasksRemain(); + expected_value += 8; + expected_value += 16; + expected_value += 256; + expected_value += 512; + expected_value += 1024; + EXPECT_EQ(expected_value, counter); + + EXPECT_EQ(task_environment.NowTicks() - start_time, kLongTaskDelay * 4); + } +} + +TEST_F(TaskEnvironmentTest, DelayedTasksUnderSystemTime) { + DelayedTasksTest(TaskEnvironment::TimeSource::SYSTEM_TIME); +} + +TEST_F(TaskEnvironmentTest, DelayedTasksUnderMockTime) { + DelayedTasksTest(TaskEnvironment::TimeSource::MOCK_TIME); +} + +// Regression test for https://crbug.com/824770. +void SupportsSequenceLocalStorageOnMainThreadTest( + TaskEnvironment::TimeSource time_source) { + TaskEnvironment task_environment( + time_source, TaskEnvironment::ThreadPoolExecutionMode::ASYNC); + + SequenceLocalStorageSlot<int> sls_slot; + sls_slot.emplace(5); + EXPECT_EQ(5, *sls_slot); +} + +TEST_F(TaskEnvironmentTest, SupportsSequenceLocalStorageOnMainThread) { + SupportsSequenceLocalStorageOnMainThreadTest( + TaskEnvironment::TimeSource::SYSTEM_TIME); +} + +TEST_F(TaskEnvironmentTest, + SupportsSequenceLocalStorageOnMainThreadWithMockTime) { + SupportsSequenceLocalStorageOnMainThreadTest( + TaskEnvironment::TimeSource::MOCK_TIME); +} + +// Verify that the right MessagePump is instantiated under each MainThreadType. +// This avoids having to run all other TaskEnvironmentTests in every +// MainThreadType which is redundant (message loop and message pump tests +// otherwise cover the advanced functionality provided by UI/IO pumps). +TEST_F(TaskEnvironmentTest, MainThreadType) { + // Uses MessageLoopCurrent as a convenience accessor but could be replaced by + // different accessors when we get rid of MessageLoopCurrent. + EXPECT_FALSE(MessageLoopCurrent::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForUI::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForIO::IsSet()); + { + TaskEnvironment task_environment; + EXPECT_TRUE(MessageLoopCurrent::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForUI::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForIO::IsSet()); + } + { + TaskEnvironment task_environment(TaskEnvironment::MainThreadType::UI); + EXPECT_TRUE(MessageLoopCurrent::IsSet()); + EXPECT_TRUE(MessageLoopCurrentForUI::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForIO::IsSet()); + } + { + TaskEnvironment task_environment(TaskEnvironment::MainThreadType::IO); + EXPECT_TRUE(MessageLoopCurrent::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForUI::IsSet()); + EXPECT_TRUE(MessageLoopCurrentForIO::IsSet()); + } + EXPECT_FALSE(MessageLoopCurrent::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForUI::IsSet()); + EXPECT_FALSE(MessageLoopCurrentForIO::IsSet()); +} + +#if defined(OS_POSIX) +TEST_F(TaskEnvironmentTest, SupportsFileDescriptorWatcherOnIOMainThread) { + TaskEnvironment task_environment(TaskEnvironment::MainThreadType::IO); + + int pipe_fds_[2]; + ASSERT_EQ(0, pipe(pipe_fds_)); + + RunLoop run_loop; + + // The write end of a newly created pipe is immediately writable. + auto controller = FileDescriptorWatcher::WatchWritable( + pipe_fds_[1], run_loop.QuitClosure()); + + // This will hang if the notification doesn't occur as expected. + run_loop.Run(); +} + +TEST_F(TaskEnvironmentTest, + SupportsFileDescriptorWatcherOnIOMockTimeMainThread) { + TaskEnvironment task_environment(TaskEnvironment::MainThreadType::IO, + TaskEnvironment::TimeSource::MOCK_TIME); + + int pipe_fds_[2]; + ASSERT_EQ(0, pipe(pipe_fds_)); + + RunLoop run_loop; + + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, BindLambdaForTesting([&]() { + int64_t x = 1; + auto ret = write(pipe_fds_[1], &x, sizeof(x)); + ASSERT_EQ(static_cast<size_t>(ret), sizeof(x)); + }), + TimeDelta::FromHours(1)); + + auto controller = FileDescriptorWatcher::WatchReadable( + pipe_fds_[0], run_loop.QuitClosure()); + + // This will hang if the notification doesn't occur as expected (Run() should + // fast-forward-time when idle). + run_loop.Run(); +} +#endif // defined(OS_POSIX) + +// Verify that the TickClock returned by +// |TaskEnvironment::GetMockTickClock| gets updated when the +// FastForward(By|UntilNoTasksRemain) functions are called. +TEST_F(TaskEnvironmentTest, FastForwardAdvancesTickClock) { + // Use a QUEUED execution-mode environment, so that no tasks are actually + // executed until RunUntilIdle()/FastForwardBy() are invoked. + TaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME, + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + constexpr base::TimeDelta kShortTaskDelay = TimeDelta::FromDays(1); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, base::DoNothing(), + kShortTaskDelay); + + constexpr base::TimeDelta kLongTaskDelay = TimeDelta::FromDays(7); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, base::DoNothing(), + kLongTaskDelay); + + const base::TickClock* tick_clock = task_environment.GetMockTickClock(); + base::TimeTicks tick_clock_ref = tick_clock->NowTicks(); + + // Make sure that |FastForwardBy| advances the clock. + task_environment.FastForwardBy(kShortTaskDelay); + EXPECT_EQ(kShortTaskDelay, tick_clock->NowTicks() - tick_clock_ref); + + // Make sure that |FastForwardUntilNoTasksRemain| advances the clock. + task_environment.FastForwardUntilNoTasksRemain(); + EXPECT_EQ(kLongTaskDelay, tick_clock->NowTicks() - tick_clock_ref); + + // Fast-forwarding to a time at which there's no tasks should also advance the + // clock. + task_environment.FastForwardBy(kLongTaskDelay); + EXPECT_EQ(kLongTaskDelay * 2, tick_clock->NowTicks() - tick_clock_ref); +} + +TEST_F(TaskEnvironmentTest, FastForwardAdvancesMockClock) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const Clock* clock = task_environment.GetMockClock(); + const Time start_time = clock->Now(); + task_environment.FastForwardBy(kDelay); + + EXPECT_EQ(start_time + kDelay, clock->Now()); +} + +TEST_F(TaskEnvironmentTest, FastForwardAdvancesTime) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const Time start_time = base::Time::Now(); + task_environment.FastForwardBy(kDelay); + EXPECT_EQ(start_time + kDelay, base::Time::Now()); +} + +TEST_F(TaskEnvironmentTest, FastForwardAdvancesTimeTicks) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const TimeTicks start_time = base::TimeTicks::Now(); + task_environment.FastForwardBy(kDelay); + EXPECT_EQ(start_time + kDelay, base::TimeTicks::Now()); +} + +TEST_F(TaskEnvironmentTest, AdvanceClockAdvancesTickClock) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const base::TickClock* tick_clock = task_environment.GetMockTickClock(); + const base::TimeTicks start_time = tick_clock->NowTicks(); + task_environment.AdvanceClock(kDelay); + + EXPECT_EQ(start_time + kDelay, tick_clock->NowTicks()); +} + +TEST_F(TaskEnvironmentTest, AdvanceClockAdvancesMockClock) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const Clock* clock = task_environment.GetMockClock(); + const Time start_time = clock->Now(); + task_environment.AdvanceClock(kDelay); + + EXPECT_EQ(start_time + kDelay, clock->Now()); +} + +TEST_F(TaskEnvironmentTest, AdvanceClockAdvancesTime) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const Time start_time = base::Time::Now(); + task_environment.AdvanceClock(kDelay); + EXPECT_EQ(start_time + kDelay, base::Time::Now()); +} + +TEST_F(TaskEnvironmentTest, AdvanceClockAdvancesTimeTicks) { + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const TimeTicks start_time = base::TimeTicks::Now(); + task_environment.AdvanceClock(kDelay); + EXPECT_EQ(start_time + kDelay, base::TimeTicks::Now()); +} + +TEST_F(TaskEnvironmentTest, AdvanceClockDoesNotRunTasks) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + constexpr base::TimeDelta kTaskDelay = TimeDelta::FromDays(1); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, base::DoNothing(), + kTaskDelay); + + EXPECT_EQ(1U, task_environment.GetPendingMainThreadTaskCount()); + EXPECT_TRUE(task_environment.NextTaskIsDelayed()); + + task_environment.AdvanceClock(kTaskDelay); + + // The task is still pending, but is now runnable. + EXPECT_EQ(1U, task_environment.GetPendingMainThreadTaskCount()); + EXPECT_FALSE(task_environment.NextTaskIsDelayed()); +} + +TEST_F(TaskEnvironmentTest, AdvanceClockSchedulesRipeDelayedTasks) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + bool ran = false; + + constexpr base::TimeDelta kTaskDelay = TimeDelta::FromDays(1); + ThreadPool::PostDelayedTask( + FROM_HERE, base::BindLambdaForTesting([&]() { ran = true; }), kTaskDelay); + + task_environment.AdvanceClock(kTaskDelay); + EXPECT_FALSE(ran); + task_environment.RunUntilIdle(); + EXPECT_TRUE(ran); +} + +// Verify that FastForwardBy() runs existing immediate tasks before advancing, +// then advances to the next delayed task, runs it, then advances the remainder +// of time when out of tasks. +TEST_F(TaskEnvironmentTest, FastForwardOnlyAdvancesWhenIdle) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const TimeTicks start_time = base::TimeTicks::Now(); + + constexpr base::TimeDelta kDelay = TimeDelta::FromSeconds(42); + constexpr base::TimeDelta kFastForwardUntil = TimeDelta::FromSeconds(100); + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, BindLambdaForTesting( + [&]() { EXPECT_EQ(start_time, base::TimeTicks::Now()); })); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, BindLambdaForTesting([&]() { + EXPECT_EQ(start_time + kDelay, base::TimeTicks::Now()); + }), + kDelay); + task_environment.FastForwardBy(kFastForwardUntil); + EXPECT_EQ(start_time + kFastForwardUntil, base::TimeTicks::Now()); +} + +// FastForwardBy(0) should be equivalent of RunUntilIdle(). +TEST_F(TaskEnvironmentTest, FastForwardZero) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + std::atomic_int run_count{0}; + + for (int i = 0; i < 1000; ++i) { + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, BindLambdaForTesting([&]() { + run_count.fetch_add(1, std::memory_order_relaxed); + })); + ThreadPool::PostTask(FROM_HERE, BindLambdaForTesting([&]() { + run_count.fetch_add(1, std::memory_order_relaxed); + })); + } + + task_environment.FastForwardBy(base::TimeDelta()); + + EXPECT_EQ(2000, run_count.load(std::memory_order_relaxed)); +} + +TEST_F(TaskEnvironmentTest, NestedFastForwardBy) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + constexpr TimeDelta kDelayPerTask = TimeDelta::FromMilliseconds(1); + const TimeTicks start_time = task_environment.NowTicks(); + + int max_nesting_level = 0; + + RepeatingClosure post_fast_forwarding_task; + post_fast_forwarding_task = BindLambdaForTesting([&]() { + if (max_nesting_level < 5) { + ++max_nesting_level; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_fast_forwarding_task, kDelayPerTask); + task_environment.FastForwardBy(kDelayPerTask); + } + }); + post_fast_forwarding_task.Run(); + + EXPECT_EQ(max_nesting_level, 5); + EXPECT_EQ(task_environment.NowTicks(), start_time + kDelayPerTask * 5); +} + +TEST_F(TaskEnvironmentTest, NestedRunInFastForwardBy) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + constexpr TimeDelta kDelayPerTask = TimeDelta::FromMilliseconds(1); + const TimeTicks start_time = task_environment.NowTicks(); + + std::vector<RunLoop*> run_loops; + + RepeatingClosure post_and_runloop_task; + post_and_runloop_task = BindLambdaForTesting([&]() { + // Run 4 nested run loops on top of the initial FastForwardBy(). + if (run_loops.size() < 4U) { + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_and_runloop_task, kDelayPerTask); + + RunLoop run_loop(RunLoop::Type::kNestableTasksAllowed); + run_loops.push_back(&run_loop); + run_loop.Run(); + } else { + for (RunLoop* run_loop : run_loops) { + run_loop->Quit(); + } + } + }); + + // Initial task is FastForwardBy(). + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_and_runloop_task, kDelayPerTask); + task_environment.FastForwardBy(kDelayPerTask); + + EXPECT_EQ(run_loops.size(), 4U); + EXPECT_EQ(task_environment.NowTicks(), start_time + kDelayPerTask * 5); +} + +TEST_F(TaskEnvironmentTest, + CrossThreadImmediateTaskPostingDoesntAffectMockTime) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + int count = 0; + + // Post tasks delayd between 0 and 999 seconds. + for (int i = 0; i < 1000; ++i) { + const TimeDelta delay = TimeDelta::FromSeconds(i); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce( + [](TimeTicks expected_run_time, int* count) { + EXPECT_EQ(expected_run_time, TimeTicks::Now()); + ++*count; + }, + TimeTicks::Now() + delay, &count), + delay); + } + + // Having a bunch of tasks running in parallel and replying to the main thread + // shouldn't affect the rest of this test. Wait for the first task to run + // before proceeding with the test to increase the likelihood of exercising + // races. + base::WaitableEvent first_reply_is_incoming; + for (int i = 0; i < 1000; ++i) { + ThreadPool::PostTaskAndReply( + FROM_HERE, + BindOnce(&WaitableEvent::Signal, Unretained(&first_reply_is_incoming)), + DoNothing()); + } + first_reply_is_incoming.Wait(); + + task_environment.FastForwardBy(TimeDelta::FromSeconds(1000)); + + // If this test flakes it's because there's an error with MockTimeDomain. + EXPECT_EQ(count, 1000); + + // Flush any remaining asynchronous tasks with Unretained() state. + task_environment.RunUntilIdle(); +} + +TEST_F(TaskEnvironmentTest, MultiThreadedMockTime) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + constexpr TimeDelta kOneMs = TimeDelta::FromMilliseconds(1); + const TimeTicks start_time = task_environment.NowTicks(); + const TimeTicks end_time = start_time + TimeDelta::FromMilliseconds(1'000); + + // Last TimeTicks::Now() seen from either contexts. + TimeTicks last_main_thread_ticks = start_time; + TimeTicks last_thread_pool_ticks = start_time; + + RepeatingClosure post_main_thread_delayed_task; + post_main_thread_delayed_task = BindLambdaForTesting([&]() { + // Expect that time only moves forward. + EXPECT_GE(task_environment.NowTicks(), last_main_thread_ticks); + + // Post four tasks to exercise the system some more but only if this is the + // first task at its runtime (otherwise we end up with 4^10'000 tasks by + // the end!). + if (last_main_thread_ticks < task_environment.NowTicks() && + task_environment.NowTicks() < end_time) { + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_main_thread_delayed_task, kOneMs); + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_main_thread_delayed_task, kOneMs); + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_main_thread_delayed_task, kOneMs); + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_main_thread_delayed_task, kOneMs); + } + + last_main_thread_ticks = task_environment.NowTicks(); + }); + + RepeatingClosure post_thread_pool_delayed_task; + post_thread_pool_delayed_task = BindLambdaForTesting([&]() { + // Expect that time only moves forward. + EXPECT_GE(task_environment.NowTicks(), last_thread_pool_ticks); + + // Post four tasks to exercise the system some more but only if this is the + // first task at its runtime (otherwise we end up with 4^10'000 tasks by + // the end!). + if (last_thread_pool_ticks < task_environment.NowTicks() && + task_environment.NowTicks() < end_time) { + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_thread_pool_delayed_task, kOneMs); + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_thread_pool_delayed_task, kOneMs); + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_thread_pool_delayed_task, kOneMs); + SequencedTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_thread_pool_delayed_task, kOneMs); + + EXPECT_LT(task_environment.NowTicks(), end_time); + } + + last_thread_pool_ticks = task_environment.NowTicks(); + }); + + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, post_main_thread_delayed_task, kOneMs); + ThreadPool::CreateSequencedTaskRunner({})->PostDelayedTask( + FROM_HERE, post_thread_pool_delayed_task, kOneMs); + + task_environment.FastForwardUntilNoTasksRemain(); + + EXPECT_EQ(last_main_thread_ticks, end_time); + EXPECT_EQ(last_thread_pool_ticks, end_time); + EXPECT_EQ(task_environment.NowTicks(), end_time); +} + +// This test ensures the implementation of FastForwardBy() doesn't fast-forward +// beyond the cap it reaches idle with pending delayed tasks further ahead on +// the main thread. +TEST_F(TaskEnvironmentTest, MultiThreadedFastForwardBy) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const TimeTicks start_time = task_environment.NowTicks(); + + // The 1s delayed task in the pool should run but not the 5s delayed task on + // the main thread and fast-forward by should be capped at +2s. + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE), + TimeDelta::FromSeconds(5)); + ThreadPool::PostDelayedTask(FROM_HERE, {}, MakeExpectedRunClosure(FROM_HERE), + TimeDelta::FromSeconds(1)); + task_environment.FastForwardBy(TimeDelta::FromSeconds(2)); + + EXPECT_EQ(task_environment.NowTicks(), + start_time + TimeDelta::FromSeconds(2)); +} + +// Verify that ThreadPoolExecutionMode::QUEUED doesn't prevent running tasks and +// advancing time on the main thread. +TEST_F(TaskEnvironmentTest, MultiThreadedMockTimeAndThreadPoolQueuedMode) { + TaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME, + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + int count = 0; + const TimeTicks start_time = task_environment.NowTicks(); + + RunLoop run_loop; + + // Neither of these should run automatically per + // ThreadPoolExecutionMode::QUEUED. + ThreadPool::PostTask(FROM_HERE, + BindLambdaForTesting([&]() { count += 128; })); + ThreadPool::PostDelayedTask(FROM_HERE, {}, + BindLambdaForTesting([&]() { count += 256; }), + TimeDelta::FromSeconds(5)); + + // Time should auto-advance to +500s in RunLoop::Run() without having to run + // the above forcefully QUEUED tasks. + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, BindLambdaForTesting([&]() { count += 1; })); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, + BindLambdaForTesting([&]() { + count += 2; + run_loop.Quit(); + }), + TimeDelta::FromSeconds(500)); + + int expected_value = 0; + EXPECT_EQ(expected_value, count); + run_loop.Run(); + expected_value += 1; + expected_value += 2; + EXPECT_EQ(expected_value, count); + EXPECT_EQ(task_environment.NowTicks() - start_time, + TimeDelta::FromSeconds(500)); + + // Fast-forward through all remaining tasks, this should unblock QUEUED tasks + // in the thread pool but shouldn't need to advance time to process them. + task_environment.FastForwardUntilNoTasksRemain(); + expected_value += 128; + expected_value += 256; + EXPECT_EQ(expected_value, count); + EXPECT_EQ(task_environment.NowTicks() - start_time, + TimeDelta::FromSeconds(500)); + + // Test advancing time to a QUEUED task in the future. + ThreadPool::PostDelayedTask(FROM_HERE, + BindLambdaForTesting([&]() { count += 512; }), + TimeDelta::FromSeconds(5)); + task_environment.FastForwardBy(TimeDelta::FromSeconds(7)); + expected_value += 512; + EXPECT_EQ(expected_value, count); + EXPECT_EQ(task_environment.NowTicks() - start_time, + TimeDelta::FromSeconds(507)); + + // Confirm that QUEUED mode is still active after the above fast forwarding + // (only the main thread task should run from RunLoop). + ThreadPool::PostTask(FROM_HERE, + BindLambdaForTesting([&]() { count += 1024; })); + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, BindLambdaForTesting([&]() { count += 2048; })); + PlatformThread::Sleep(TimeDelta::FromMilliseconds(1)); + RunLoop().RunUntilIdle(); + expected_value += 2048; + EXPECT_EQ(expected_value, count); + EXPECT_EQ(task_environment.NowTicks() - start_time, + TimeDelta::FromSeconds(507)); + + // Run the remaining task to avoid use-after-free on |count| from + // ~TaskEnvironment(). + task_environment.RunUntilIdle(); + expected_value += 1024; + EXPECT_EQ(expected_value, count); +} + +#if defined(OS_WIN) +// Regression test to ensure that TaskEnvironment enables the MTA in the +// thread pool (so that the test environment matches that of the browser process +// and com_init_util.h's assertions are happy in unit tests). +TEST_F(TaskEnvironmentTest, ThreadPoolPoolAllowsMTA) { + TaskEnvironment task_environment; + ThreadPool::PostTask(FROM_HERE, BindOnce(&win::AssertComApartmentType, + win::ComApartmentType::MTA)); + task_environment.RunUntilIdle(); +} +#endif // defined(OS_WIN) + +TEST_F(TaskEnvironmentTest, SetsDefaultRunTimeout) { + const RunLoop::RunLoopTimeout* old_run_timeout = + ScopedRunLoopTimeout::GetTimeoutForCurrentThread(); + + { + TaskEnvironment task_environment; + + // TaskEnvironment should set a default Run() timeout that fails the + // calling test (before test_launcher_timeout()). + + const RunLoop::RunLoopTimeout* run_timeout = + ScopedRunLoopTimeout::GetTimeoutForCurrentThread(); + EXPECT_NE(run_timeout, old_run_timeout); + EXPECT_TRUE(run_timeout); + if (!debug::BeingDebugged()) { + EXPECT_LT(run_timeout->timeout, TestTimeouts::test_launcher_timeout()); + } + static const RepeatingClosure& static_on_timeout = run_timeout->on_timeout; + EXPECT_FATAL_FAILURE(static_on_timeout.Run(), "RunLoop::Run() timed out"); + } + + EXPECT_EQ(ScopedRunLoopTimeout::GetTimeoutForCurrentThread(), + old_run_timeout); +} + +TEST_F(TaskEnvironmentTest, DescribePendingMainThreadTasks) { + TaskEnvironment task_environment; + ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, DoNothing()); + + test::MockLog mock_log; + mock_log.StartCapturingLogs(); + + EXPECT_CALL(mock_log, Log(::logging::LOG_INFO, _, _, _, + HasSubstr("task_environment_unittest.cc"))) + .WillOnce(Return(true)); + task_environment.DescribePendingMainThreadTasks(); + + task_environment.RunUntilIdle(); + + EXPECT_CALL(mock_log, Log(::logging::LOG_INFO, _, _, _, + Not(HasSubstr("task_environment_unittest.cc")))) + .WillOnce(Return(true)); + task_environment.DescribePendingMainThreadTasks(); +} + +TEST_F(TaskEnvironmentTest, Basic) { + TaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME, + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + int counter = 0; + + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, + BindOnce([](int* counter) { *counter += 1; }, Unretained(&counter))); + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, + BindOnce([](int* counter) { *counter += 32; }, Unretained(&counter))); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce([](int* counter) { *counter += 256; }, Unretained(&counter)), + TimeDelta::FromSeconds(3)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce([](int* counter) { *counter += 64; }, Unretained(&counter)), + TimeDelta::FromSeconds(1)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce([](int* counter) { *counter += 1024; }, Unretained(&counter)), + TimeDelta::FromMinutes(20)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + BindOnce([](int* counter) { *counter += 4096; }, Unretained(&counter)), + TimeDelta::FromDays(20)); + + int expected_value = 0; + EXPECT_EQ(expected_value, counter); + task_environment.RunUntilIdle(); + expected_value += 1; + expected_value += 32; + EXPECT_EQ(expected_value, counter); + + task_environment.RunUntilIdle(); + EXPECT_EQ(expected_value, counter); + + task_environment.FastForwardBy(TimeDelta::FromSeconds(1)); + expected_value += 64; + EXPECT_EQ(expected_value, counter); + + task_environment.FastForwardBy(TimeDelta::FromSeconds(5)); + expected_value += 256; + EXPECT_EQ(expected_value, counter); + + task_environment.FastForwardUntilNoTasksRemain(); + expected_value += 1024; + expected_value += 4096; + EXPECT_EQ(expected_value, counter); +} + +TEST_F(TaskEnvironmentTest, RunLoopDriveable) { + TaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME, + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + int counter = 0; + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce([](int* counter) { *counter += 1; }, + Unretained(&counter))); + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce([](int* counter) { *counter += 32; }, + Unretained(&counter))); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 256; }, + Unretained(&counter)), + TimeDelta::FromSeconds(3)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 64; }, + Unretained(&counter)), + TimeDelta::FromSeconds(1)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 1024; }, + Unretained(&counter)), + TimeDelta::FromMinutes(20)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 4096; }, + Unretained(&counter)), + TimeDelta::FromDays(20)); + + int expected_value = 0; + EXPECT_EQ(expected_value, counter); + RunLoop().RunUntilIdle(); + expected_value += 1; + expected_value += 32; + EXPECT_EQ(expected_value, counter); + + RunLoop().RunUntilIdle(); + EXPECT_EQ(expected_value, counter); + + { + RunLoop run_loop; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitClosure(), TimeDelta::FromSeconds(1)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 8192; }, + Unretained(&counter)), + TimeDelta::FromSeconds(1)); + + // The QuitClosure() should be ordered between the 64 and the 8192 + // increments and should preempt the latter. + run_loop.Run(); + expected_value += 64; + EXPECT_EQ(expected_value, counter); + + // Running until idle should process the 8192 increment whose delay has + // expired in the previous Run(). + RunLoop().RunUntilIdle(); + expected_value += 8192; + EXPECT_EQ(expected_value, counter); + } + + { + RunLoop run_loop; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromSeconds(5)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 16384; }, + Unretained(&counter)), + TimeDelta::FromSeconds(5)); + + // The QuitWhenIdleClosure() shouldn't preempt equally delayed tasks and as + // such the 16384 increment should be processed before quitting. + run_loop.Run(); + expected_value += 256; + expected_value += 16384; + EXPECT_EQ(expected_value, counter); + } + + // Process the remaining tasks (note: do not mimic this elsewhere, + // TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() is a better API to + // do this, this is just done here for the purpose of extensively testing the + // RunLoop approach). + + // Disable Run() timeout here, otherwise we'll fast-forward to it before we + // reach the quit task. + ScopedDisableRunLoopTimeout disable_timeout; + + RunLoop run_loop; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromDays(50)); + + run_loop.Run(); + expected_value += 1024; + expected_value += 4096; + EXPECT_EQ(expected_value, counter); +} + +TEST_F(TaskEnvironmentTest, CancelPendingTask) { + TaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME, + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + CancelableOnceClosure task1(BindOnce([]() {})); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, task1.callback(), + TimeDelta::FromSeconds(1)); + EXPECT_TRUE(task_environment.MainThreadIsIdle()); + EXPECT_EQ(1u, task_environment.GetPendingMainThreadTaskCount()); + EXPECT_EQ(TimeDelta::FromSeconds(1), + task_environment.NextMainThreadPendingTaskDelay()); + EXPECT_TRUE(task_environment.MainThreadIsIdle()); + task1.Cancel(); + EXPECT_TRUE(task_environment.MainThreadIsIdle()); + EXPECT_EQ(TimeDelta::Max(), + task_environment.NextMainThreadPendingTaskDelay()); + + CancelableClosure task2(BindRepeating([]() {})); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, task2.callback(), + TimeDelta::FromSeconds(1)); + task2.Cancel(); + EXPECT_EQ(0u, task_environment.GetPendingMainThreadTaskCount()); + + CancelableClosure task3(BindRepeating([]() {})); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, task3.callback(), + TimeDelta::FromSeconds(1)); + task3.Cancel(); + EXPECT_EQ(TimeDelta::Max(), + task_environment.NextMainThreadPendingTaskDelay()); + + CancelableClosure task4(BindRepeating([]() {})); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, task4.callback(), + TimeDelta::FromSeconds(1)); + task4.Cancel(); + EXPECT_TRUE(task_environment.MainThreadIsIdle()); +} + +TEST_F(TaskEnvironmentTest, CancelPendingImmediateTask) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + EXPECT_TRUE(task_environment.MainThreadIsIdle()); + + CancelableOnceClosure task1(BindOnce([]() {})); + ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, task1.callback()); + EXPECT_FALSE(task_environment.MainThreadIsIdle()); + + task1.Cancel(); + EXPECT_TRUE(task_environment.MainThreadIsIdle()); +} + +TEST_F(TaskEnvironmentTest, NoFastForwardToCancelledTask) { + TaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME, + TaskEnvironment::ThreadPoolExecutionMode::QUEUED); + + TimeTicks start_time = task_environment.NowTicks(); + CancelableClosure task(BindRepeating([]() {})); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, task.callback(), + TimeDelta::FromSeconds(1)); + EXPECT_EQ(TimeDelta::FromSeconds(1), + task_environment.NextMainThreadPendingTaskDelay()); + task.Cancel(); + task_environment.FastForwardUntilNoTasksRemain(); + EXPECT_EQ(start_time, task_environment.NowTicks()); +} + +TEST_F(TaskEnvironmentTest, NextTaskIsDelayed) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + EXPECT_FALSE(task_environment.NextTaskIsDelayed()); + CancelableClosure task(BindRepeating([]() {})); + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, task.callback(), + TimeDelta::FromSeconds(1)); + EXPECT_TRUE(task_environment.NextTaskIsDelayed()); + task.Cancel(); + EXPECT_FALSE(task_environment.NextTaskIsDelayed()); + + ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, BindOnce([]() {}), + TimeDelta::FromSeconds(2)); + EXPECT_TRUE(task_environment.NextTaskIsDelayed()); + task_environment.FastForwardUntilNoTasksRemain(); + EXPECT_FALSE(task_environment.NextTaskIsDelayed()); + + ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce([]() {})); + EXPECT_FALSE(task_environment.NextTaskIsDelayed()); +} + +TEST_F(TaskEnvironmentTest, NextMainThreadPendingTaskDelayWithImmediateTask) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + EXPECT_EQ(TimeDelta::Max(), + task_environment.NextMainThreadPendingTaskDelay()); + ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, BindOnce([]() {})); + EXPECT_EQ(TimeDelta(), task_environment.NextMainThreadPendingTaskDelay()); +} + +TEST_F(TaskEnvironmentTest, TimeSourceMockTimeAlsoMocksNow) { + TaskEnvironment task_environment(TaskEnvironment::TimeSource::MOCK_TIME); + + const TimeTicks start_ticks = task_environment.NowTicks(); + EXPECT_EQ(TimeTicks::Now(), start_ticks); + + const Time start_time = Time::Now(); + + constexpr TimeDelta kDelay = TimeDelta::FromSeconds(10); + task_environment.FastForwardBy(kDelay); + EXPECT_EQ(TimeTicks::Now(), start_ticks + kDelay); + EXPECT_EQ(Time::Now(), start_time + kDelay); +} + +TEST_F(TaskEnvironmentTest, SingleThread) { + SingleThreadTaskEnvironment task_environment; + EXPECT_THAT(ThreadPoolInstance::Get(), IsNull()); + + bool ran = false; + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindLambdaForTesting([&]() { ran = true; })); + RunLoop().RunUntilIdle(); + EXPECT_TRUE(ran); + + EXPECT_DCHECK_DEATH(ThreadPool::PostTask(FROM_HERE, {}, DoNothing())); +} + +// Verify that traits other than ThreadingMode can be applied to +// SingleThreadTaskEnvironment. +TEST_F(TaskEnvironmentTest, SingleThreadMockTime) { + SingleThreadTaskEnvironment task_environment( + TaskEnvironment::TimeSource::MOCK_TIME); + + const TimeTicks start_time = TimeTicks::Now(); + + constexpr TimeDelta kDelay = TimeDelta::FromSeconds(100); + + int counter = 0; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, base::BindLambdaForTesting([&]() { counter += 1; }), kDelay); + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindLambdaForTesting([&]() { counter += 2; })); + + int expected_value = 0; + EXPECT_EQ(expected_value, counter); + + task_environment.RunUntilIdle(); + expected_value += 2; + EXPECT_EQ(expected_value, counter); + + task_environment.FastForwardUntilNoTasksRemain(); + expected_value += 1; + EXPECT_EQ(expected_value, counter); + EXPECT_EQ(TimeTicks::Now(), start_time + kDelay); +} + +#if defined(OS_WIN) +namespace { + +enum class ApartmentType { + kSTA, + kMTA, +}; + +void InitializeSTAApartment() { + base::win::ScopedCOMInitializer initializer; + EXPECT_TRUE(initializer.Succeeded()); +} + +void InitializeMTAApartment() { + base::win::ScopedCOMInitializer initializer( + base::win::ScopedCOMInitializer::kMTA); + EXPECT_TRUE(initializer.Succeeded()); +} + +void InitializeCOMOnWorker( + TaskEnvironment::ThreadPoolCOMEnvironment com_environment, + ApartmentType apartment_type) { + TaskEnvironment task_environment(com_environment); + ThreadPool::PostTask(FROM_HERE, BindOnce(apartment_type == ApartmentType::kSTA + ? &InitializeSTAApartment + : &InitializeMTAApartment)); + task_environment.RunUntilIdle(); +} + +} // namespace + +TEST_F(TaskEnvironmentTest, DefaultCOMEnvironment) { + // Attempt to initialize an MTA COM apartment. Expect this to succeed since + // the thread is already in an MTA apartment. + InitializeCOMOnWorker(TaskEnvironment::ThreadPoolCOMEnvironment::DEFAULT, + ApartmentType::kMTA); + + // Attempt to initialize an STA COM apartment. Expect this to fail since the + // thread is already in an MTA apartment. + EXPECT_DCHECK_DEATH(InitializeCOMOnWorker( + TaskEnvironment::ThreadPoolCOMEnvironment::DEFAULT, ApartmentType::kSTA)); +} + +TEST_F(TaskEnvironmentTest, NoCOMEnvironment) { + // Attempt to initialize both MTA and STA COM apartments. Both should succeed + // when the thread is not already in an apartment. + InitializeCOMOnWorker(TaskEnvironment::ThreadPoolCOMEnvironment::NONE, + ApartmentType::kMTA); + InitializeCOMOnWorker(TaskEnvironment::ThreadPoolCOMEnvironment::NONE, + ApartmentType::kSTA); +} +#endif + +} // namespace test +} // namespace base diff --git a/chromium/base/test/task_runner_test_template.cc b/chromium/base/test/task_runner_test_template.cc new file mode 100644 index 00000000000..e92e8467c6c --- /dev/null +++ b/chromium/base/test/task_runner_test_template.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/task_runner_test_template.h" + +namespace base { + +namespace test { + +TaskTracker::TaskTracker() : task_runs_(0), task_runs_cv_(&lock_) {} + +TaskTracker::~TaskTracker() = default; + +RepeatingClosure TaskTracker::WrapTask(RepeatingClosure task, int i) { + return BindRepeating(&TaskTracker::RunTask, this, std::move(task), i); +} + +void TaskTracker::RunTask(RepeatingClosure task, int i) { + AutoLock lock(lock_); + if (!task.is_null()) { + task.Run(); + } + ++task_run_counts_[i]; + ++task_runs_; + task_runs_cv_.Signal(); +} + +std::map<int, int> TaskTracker::GetTaskRunCounts() const { + AutoLock lock(lock_); + return task_run_counts_; +} + +void TaskTracker::WaitForCompletedTasks(int count) { + AutoLock lock(lock_); + while (task_runs_ < count) + task_runs_cv_.Wait(); +} + +} // namespace test + +} // namespace base diff --git a/chromium/base/test/task_runner_test_template.h b/chromium/base/test/task_runner_test_template.h new file mode 100644 index 00000000000..37acde16686 --- /dev/null +++ b/chromium/base/test/task_runner_test_template.h @@ -0,0 +1,170 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file defines tests that implementations of TaskRunner should +// pass in order to be conformant, as well as test cases for optional behavior. +// Here's how you use it to test your implementation. +// +// Say your class is called MyTaskRunner. Then you need to define a +// class called MyTaskRunnerTestDelegate in my_task_runner_unittest.cc +// like this: +// +// class MyTaskRunnerTestDelegate { +// public: +// // Tasks posted to the task runner after this and before +// // StopTaskRunner() is called is called should run successfully. +// void StartTaskRunner() { +// ... +// } +// +// // Should return the task runner implementation. Only called +// // after StartTaskRunner and before StopTaskRunner. +// scoped_refptr<MyTaskRunner> GetTaskRunner() { +// ... +// } +// +// // Stop the task runner and make sure all tasks posted before +// // this is called are run. Caveat: delayed tasks are not run, + // they're simply deleted. +// void StopTaskRunner() { +// ... +// } +// }; +// +// The TaskRunnerTest test harness will have a member variable of +// this delegate type and will call its functions in the various +// tests. +// +// Then you simply #include this file as well as gtest.h and add the +// following statement to my_task_runner_unittest.cc: +// +// INSTANTIATE_TYPED_TEST_SUITE_P( +// MyTaskRunner, TaskRunnerTest, MyTaskRunnerTestDelegate); +// +// Easy! + +#ifndef BASE_TEST_TASK_RUNNER_TEST_TEMPLATE_H_ +#define BASE_TEST_TASK_RUNNER_TEST_TEMPLATE_H_ + +#include <cstddef> +#include <map> + +#include "base/bind.h" +#include "base/callback.h" +#include "base/location.h" +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/single_thread_task_runner.h" +#include "base/synchronization/condition_variable.h" +#include "base/synchronization/lock.h" +#include "base/task_runner.h" +#include "base/threading/thread.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +namespace test { + +// Utility class that keeps track of how many times particular tasks +// are run. +class TaskTracker : public RefCountedThreadSafe<TaskTracker> { + public: + TaskTracker(); + + // Returns a closure that runs the given task and increments the run + // count of |i| by one. |task| may be null. It is guaranteed that + // only one task wrapped by a given tracker will be run at a time. + RepeatingClosure WrapTask(RepeatingClosure task, int i); + + std::map<int, int> GetTaskRunCounts() const; + + // Returns after the tracker observes a total of |count| task completions. + void WaitForCompletedTasks(int count); + + private: + friend class RefCountedThreadSafe<TaskTracker>; + + ~TaskTracker(); + + void RunTask(RepeatingClosure task, int i); + + mutable Lock lock_; + std::map<int, int> task_run_counts_; + int task_runs_; + ConditionVariable task_runs_cv_; + + DISALLOW_COPY_AND_ASSIGN(TaskTracker); +}; + +} // namespace test + +template <typename TaskRunnerTestDelegate> +class TaskRunnerTest : public testing::Test { + protected: + TaskRunnerTest() : task_tracker_(base::MakeRefCounted<test::TaskTracker>()) {} + + const scoped_refptr<test::TaskTracker> task_tracker_; + TaskRunnerTestDelegate delegate_; +}; + +TYPED_TEST_SUITE_P(TaskRunnerTest); + +// We can't really test much, since TaskRunner provides very few +// guarantees. + +// Post a bunch of tasks to the task runner. They should all +// complete. +TYPED_TEST_P(TaskRunnerTest, Basic) { + std::map<int, int> expected_task_run_counts; + + this->delegate_.StartTaskRunner(); + scoped_refptr<TaskRunner> task_runner = this->delegate_.GetTaskRunner(); + // Post each ith task i+1 times. + for (int i = 0; i < 20; ++i) { + RepeatingClosure ith_task = + this->task_tracker_->WrapTask(RepeatingClosure(), i); + for (int j = 0; j < i + 1; ++j) { + task_runner->PostTask(FROM_HERE, ith_task); + ++expected_task_run_counts[i]; + } + } + this->delegate_.StopTaskRunner(); + + EXPECT_EQ(expected_task_run_counts, + this->task_tracker_->GetTaskRunCounts()); +} + +// Post a bunch of delayed tasks to the task runner. They should all +// complete. +TYPED_TEST_P(TaskRunnerTest, Delayed) { + std::map<int, int> expected_task_run_counts; + int expected_total_tasks = 0; + + this->delegate_.StartTaskRunner(); + scoped_refptr<TaskRunner> task_runner = this->delegate_.GetTaskRunner(); + // Post each ith task i+1 times with delays from 0-i. + for (int i = 0; i < 20; ++i) { + RepeatingClosure ith_task = + this->task_tracker_->WrapTask(RepeatingClosure(), i); + for (int j = 0; j < i + 1; ++j) { + task_runner->PostDelayedTask( + FROM_HERE, ith_task, base::TimeDelta::FromMilliseconds(j)); + ++expected_task_run_counts[i]; + ++expected_total_tasks; + } + } + this->task_tracker_->WaitForCompletedTasks(expected_total_tasks); + this->delegate_.StopTaskRunner(); + + EXPECT_EQ(expected_task_run_counts, + this->task_tracker_->GetTaskRunCounts()); +} + +// The TaskRunnerTest test case verifies behaviour that is expected from a +// task runner in order to be conformant. +REGISTER_TYPED_TEST_SUITE_P(TaskRunnerTest, Basic, Delayed); + +} // namespace base + +#endif // BASE_TEST_TASK_RUNNER_TEST_TEMPLATE_H_ diff --git a/chromium/base/test/test_child_process.cc b/chromium/base/test/test_child_process.cc new file mode 100644 index 00000000000..ce158561163 --- /dev/null +++ b/chromium/base/test/test_child_process.cc @@ -0,0 +1,43 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +// Simple testing command, used to exercise child process launcher calls. +// +// Usage: +// echo_test_helper [-x exit_code] arg0 arg1 arg2... +// Prints arg0..n to stdout with space delimiters between args, +// returning "exit_code" if -x is specified. +// +// echo_test_helper -e env_var +// Prints the environmental variable |env_var| to stdout. +int main(int argc, char** argv) { + if (strcmp(argv[1], "-e") == 0) { + if (argc != 3) { + return 1; + } + + const char* env = getenv(argv[2]); + if (env != NULL) { + printf("%s", env); + } + } else { + int return_code = 0; + int start_idx = 1; + + if (strcmp(argv[1], "-x") == 0) { + return_code = atoi(argv[2]); + start_idx = 3; + } + + for (int i = start_idx; i < argc; ++i) { + printf((i < argc - 1 ? "%s " : "%s"), argv[i]); + } + + return return_code; + } +} diff --git a/chromium/base/test/test_discardable_memory_allocator.cc b/chromium/base/test/test_discardable_memory_allocator.cc new file mode 100644 index 00000000000..26bc5b036a8 --- /dev/null +++ b/chromium/base/test/test_discardable_memory_allocator.cc @@ -0,0 +1,67 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_discardable_memory_allocator.h" + +#include <cstdint> +#include <cstring> + +#include "base/check.h" +#include "base/memory/discardable_memory.h" +#include "base/memory/ptr_util.h" + +namespace base { +namespace { + +class DiscardableMemoryImpl : public DiscardableMemory { + public: + explicit DiscardableMemoryImpl(size_t size) + : data_(new uint8_t[size]), size_(size) {} + + // Overridden from DiscardableMemory: + bool Lock() override { + DCHECK(!is_locked_); + is_locked_ = true; + return false; + } + + void Unlock() override { + DCHECK(is_locked_); + is_locked_ = false; + // Force eviction to catch clients not correctly checking the return value + // of Lock(). + memset(data_.get(), 0, size_); + } + + void* data() const override { + DCHECK(is_locked_); + return data_.get(); + } + + void DiscardForTesting() override {} + + trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump( + const char* name, + trace_event::ProcessMemoryDump* pmd) const override { + return nullptr; + } + + private: + bool is_locked_ = true; + std::unique_ptr<uint8_t[]> data_; + size_t size_; +}; + +} // namespace + +std::unique_ptr<DiscardableMemory> +TestDiscardableMemoryAllocator::AllocateLockedDiscardableMemory(size_t size) { + return std::make_unique<DiscardableMemoryImpl>(size); +} + +size_t TestDiscardableMemoryAllocator::GetBytesAllocated() const { + return 0U; +} + +} // namespace base diff --git a/chromium/base/test/test_discardable_memory_allocator.h b/chromium/base/test/test_discardable_memory_allocator.h new file mode 100644 index 00000000000..ba9719ada4a --- /dev/null +++ b/chromium/base/test/test_discardable_memory_allocator.h @@ -0,0 +1,38 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_DISCARDABLE_MEMORY_ALLOCATOR_H_ +#define BASE_TEST_TEST_DISCARDABLE_MEMORY_ALLOCATOR_H_ + +#include <stddef.h> + +#include "base/macros.h" +#include "base/memory/discardable_memory_allocator.h" + +namespace base { + +// TestDiscardableMemoryAllocator is a simple DiscardableMemoryAllocator +// implementation that can be used for testing. It allocates one-shot +// DiscardableMemory instances backed by heap memory. +class TestDiscardableMemoryAllocator : public DiscardableMemoryAllocator { + public: + TestDiscardableMemoryAllocator() = default; + + // Overridden from DiscardableMemoryAllocator: + std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory( + size_t size) override; + + size_t GetBytesAllocated() const override; + + void ReleaseFreeMemory() override { + // Do nothing since it is backed by heap memory. + } + + private: + DISALLOW_COPY_AND_ASSIGN(TestDiscardableMemoryAllocator); +}; + +} // namespace base + +#endif // BASE_TEST_TEST_DISCARDABLE_MEMORY_ALLOCATOR_H_ diff --git a/chromium/base/test/test_file_util.cc b/chromium/base/test/test_file_util.cc new file mode 100644 index 00000000000..8dafc58a7df --- /dev/null +++ b/chromium/base/test/test_file_util.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_file_util.h" + +#include "base/test/test_timeouts.h" +#include "base/threading/platform_thread.h" + +namespace base { + +bool EvictFileFromSystemCacheWithRetry(const FilePath& path) { + const int kCycles = 10; + const TimeDelta kDelay = TestTimeouts::action_timeout() / kCycles; + for (int i = 0; i < kCycles; i++) { + if (EvictFileFromSystemCache(path)) + return true; + PlatformThread::Sleep(kDelay); + } + return false; +} + +} // namespace base diff --git a/chromium/base/test/test_file_util.h b/chromium/base/test/test_file_util.h new file mode 100644 index 00000000000..f9951b0fb14 --- /dev/null +++ b/chromium/base/test/test_file_util.h @@ -0,0 +1,85 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_FILE_UTIL_H_ +#define BASE_TEST_TEST_FILE_UTIL_H_ + +// File utility functions used only by tests. + +#include <stddef.h> + +#include <string> + +#include "base/compiler_specific.h" +#include "base/files/file_path.h" +#include "base/macros.h" +#include "build/build_config.h" + +#if defined(OS_ANDROID) +#include <jni.h> +#endif + +#if defined(OS_WIN) +#include <windows.h> +#endif + +namespace base { + +// Clear a specific file from the system cache like EvictFileFromSystemCache, +// but on failure it will sleep and retry. On the Windows buildbots, eviction +// can fail if the file is marked in use, and this will throw off timings that +// rely on uncached files. +bool EvictFileFromSystemCacheWithRetry(const FilePath& file); + +// Wrapper over base::Delete. On Windows repeatedly invokes Delete in case +// of failure to workaround Windows file locking semantics. Returns true on +// success. +bool DieFileDie(const FilePath& file, bool recurse); + +// Synchronize all the dirty pages from the page cache to disk (on POSIX +// systems). The Windows analogy for this operation is to 'Flush file buffers'. +// Note: This is currently implemented as a no-op on Windows. +void SyncPageCacheToDisk(); + +// Clear a specific file from the system cache. After this call, trying +// to access this file will result in a cold load from the hard drive. +bool EvictFileFromSystemCache(const FilePath& file); + +#if defined(OS_WIN) +// Deny |permission| on the file |path| for the current user. |permission| is an +// ACCESS_MASK structure which is defined in +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374892.aspx +// Refer to https://msdn.microsoft.com/en-us/library/aa822867.aspx for a list of +// possible values. +bool DenyFilePermission(const FilePath& path, DWORD permission); +#endif // defined(OS_WIN) + +// For testing, make the file unreadable or unwritable. +// In POSIX, this does not apply to the root user. +bool MakeFileUnreadable(const FilePath& path) WARN_UNUSED_RESULT; +bool MakeFileUnwritable(const FilePath& path) WARN_UNUSED_RESULT; + +// Saves the current permissions for a path, and restores it on destruction. +class FilePermissionRestorer { + public: + explicit FilePermissionRestorer(const FilePath& path); + ~FilePermissionRestorer(); + + private: + const FilePath path_; + void* info_; // The opaque stored permission information. + size_t length_; // The length of the stored permission information. + + DISALLOW_COPY_AND_ASSIGN(FilePermissionRestorer); +}; + +#if defined(OS_ANDROID) +// Insert an image file into the MediaStore, and retrieve the content URI for +// testing purpose. +FilePath InsertImageIntoMediaStore(const FilePath& path); +#endif // defined(OS_ANDROID) + +} // namespace base + +#endif // BASE_TEST_TEST_FILE_UTIL_H_ diff --git a/chromium/base/test/test_file_util_android.cc b/chromium/base/test/test_file_util_android.cc new file mode 100644 index 00000000000..fc0beb3faa8 --- /dev/null +++ b/chromium/base/test/test_file_util_android.cc @@ -0,0 +1,26 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_file_util.h" + +#include "base/android/jni_android.h" +#include "base/android/jni_string.h" +#include "base/files/file_path.h" +#include "base/test/base_unittests_jni_headers/ContentUriTestUtils_jni.h" + +using base::android::ScopedJavaLocalRef; + +namespace base { + +FilePath InsertImageIntoMediaStore(const FilePath& path) { + JNIEnv* env = base::android::AttachCurrentThread(); + ScopedJavaLocalRef<jstring> j_path = + base::android::ConvertUTF8ToJavaString(env, path.value()); + ScopedJavaLocalRef<jstring> j_uri = + Java_ContentUriTestUtils_insertImageIntoMediaStore(env, j_path); + std::string uri = base::android::ConvertJavaStringToUTF8(j_uri); + return FilePath(uri); +} + +} // namespace base diff --git a/chromium/base/test/test_file_util_linux.cc b/chromium/base/test/test_file_util_linux.cc new file mode 100644 index 00000000000..85fb69fdfbe --- /dev/null +++ b/chromium/base/test/test_file_util_linux.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_file_util.h" + +#include <fcntl.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> + +#if defined(OS_ANDROID) +#include <asm/unistd.h> +#include <errno.h> +#include <linux/fadvise.h> +#include <sys/syscall.h> +#endif + +#include "base/files/file_path.h" +#include "base/files/scoped_file.h" +#include "base/notreached.h" + +namespace base { + +// Inconveniently, the NDK doesn't provide for posix_fadvise +// until native API level = 21, which we don't use yet, so provide a wrapper, at +// least on ARM32 +#if defined(OS_ANDROID) && __ANDROID_API__ < 21 + +namespace { +int posix_fadvise(int fd, off_t offset, off_t len, int advice) { +#if defined(ARCH_CPU_ARMEL) + // Note that the syscall argument order on ARM is different from the C + // function; this is helpfully documented in the Linux posix_fadvise manpage. + return syscall(__NR_arm_fadvise64_64, fd, advice, + 0, // Upper 32-bits for offset + offset, + 0, // Upper 32-bits for length + len); +#endif + NOTIMPLEMENTED(); + return ENOSYS; +} + +} // namespace + +#endif // OS_ANDROID + +bool EvictFileFromSystemCache(const FilePath& file) { + ScopedFD fd(open(file.value().c_str(), O_RDONLY)); + if (!fd.is_valid()) + return false; + if (fdatasync(fd.get()) != 0) + return false; + if (posix_fadvise(fd.get(), 0, 0, POSIX_FADV_DONTNEED) != 0) + return false; + return true; +} + +} // namespace base diff --git a/chromium/base/test/test_file_util_mac.cc b/chromium/base/test/test_file_util_mac.cc new file mode 100644 index 00000000000..174a31db254 --- /dev/null +++ b/chromium/base/test/test_file_util_mac.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_file_util.h" + +#include <sys/mman.h> +#include <errno.h> +#include <stdint.h> + +#include "base/files/file_util.h" +#include "base/files/memory_mapped_file.h" +#include "base/logging.h" + +namespace base { + +bool EvictFileFromSystemCache(const FilePath& file) { + // There aren't any really direct ways to purge a file from the UBC. From + // talking with Amit Singh, the safest is to mmap the file with MAP_FILE (the + // default) + MAP_SHARED, then do an msync to invalidate the memory. The next + // open should then have to load the file from disk. + + int64_t length; + if (!GetFileSize(file, &length)) { + DLOG(ERROR) << "failed to get size of " << file.value(); + return false; + } + + // When a file is empty, we do not need to evict it from cache. + // In fact, an attempt to map it to memory will result in error. + if (length == 0) { + DLOG(WARNING) << "file size is zero, will not attempt to map to memory"; + return true; + } + + MemoryMappedFile mapped_file; + if (!mapped_file.Initialize(file)) { + DLOG(WARNING) << "failed to memory map " << file.value(); + return false; + } + + if (msync(const_cast<uint8_t*>(mapped_file.data()), mapped_file.length(), + MS_INVALIDATE) != 0) { + DLOG(WARNING) << "failed to invalidate memory map of " << file.value() + << ", errno: " << errno; + return false; + } + + return true; +} + +} // namespace base diff --git a/chromium/base/test/test_file_util_posix.cc b/chromium/base/test/test_file_util_posix.cc new file mode 100644 index 00000000000..02c214a4214 --- /dev/null +++ b/chromium/base/test/test_file_util_posix.cc @@ -0,0 +1,116 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_file_util.h" + +#include <errno.h> +#include <fcntl.h> +#include <stddef.h> +#include <sys/types.h> +#include <unistd.h> + +#include <string> + +#include "base/check_op.h" +#include "base/files/file.h" +#include "base/files/file_util.h" +#include "base/notreached.h" +#include "base/strings/string_util.h" +#include "base/strings/utf_string_conversions.h" +#include "build/build_config.h" + +namespace base { + +namespace { + +// Deny |permission| on the file |path|. +bool DenyFilePermission(const FilePath& path, mode_t permission) { + stat_wrapper_t stat_buf; + if (File::Stat(path.value().c_str(), &stat_buf) != 0) + return false; + stat_buf.st_mode &= ~permission; + + int rv = HANDLE_EINTR(chmod(path.value().c_str(), stat_buf.st_mode)); + return rv == 0; +} + +// Gets a blob indicating the permission information for |path|. +// |length| is the length of the blob. Zero on failure. +// Returns the blob pointer, or NULL on failure. +void* GetPermissionInfo(const FilePath& path, size_t* length) { + DCHECK(length); + *length = 0; + + stat_wrapper_t stat_buf; + if (File::Stat(path.value().c_str(), &stat_buf) != 0) + return nullptr; + + *length = sizeof(mode_t); + mode_t* mode = new mode_t; + *mode = stat_buf.st_mode & ~S_IFMT; // Filter out file/path kind. + + return mode; +} + +// Restores the permission information for |path|, given the blob retrieved +// using |GetPermissionInfo()|. +// |info| is the pointer to the blob. +// |length| is the length of the blob. +// Either |info| or |length| may be NULL/0, in which case nothing happens. +bool RestorePermissionInfo(const FilePath& path, void* info, size_t length) { + if (!info || (length == 0)) + return false; + + DCHECK_EQ(sizeof(mode_t), length); + mode_t* mode = reinterpret_cast<mode_t*>(info); + + int rv = HANDLE_EINTR(chmod(path.value().c_str(), *mode)); + + delete mode; + + return rv == 0; +} + +} // namespace + +bool DieFileDie(const FilePath& file, bool recurse) { + // There is no need to workaround Windows problems on POSIX. + // Just pass-through. + return DeleteFile(file, recurse); +} + +void SyncPageCacheToDisk() { + // On Linux (and Android) the sync(2) call waits for I/O completions. + sync(); +} + +#if !defined(OS_LINUX) && !defined(OS_MACOSX) && !defined(OS_ANDROID) +bool EvictFileFromSystemCache(const FilePath& file) { + // There doesn't seem to be a POSIX way to cool the disk cache. + NOTIMPLEMENTED(); + return false; +} +#endif + +bool MakeFileUnreadable(const FilePath& path) { + return DenyFilePermission(path, S_IRUSR | S_IRGRP | S_IROTH); +} + +bool MakeFileUnwritable(const FilePath& path) { + return DenyFilePermission(path, S_IWUSR | S_IWGRP | S_IWOTH); +} + +FilePermissionRestorer::FilePermissionRestorer(const FilePath& path) + : path_(path), info_(nullptr), length_(0) { + info_ = GetPermissionInfo(path_, &length_); + DCHECK(info_ != nullptr); + DCHECK_NE(0u, length_); +} + +FilePermissionRestorer::~FilePermissionRestorer() { + if (!RestorePermissionInfo(path_, info_, length_)) + NOTREACHED(); +} + +} // namespace base diff --git a/chromium/base/test/test_file_util_win.cc b/chromium/base/test/test_file_util_win.cc new file mode 100644 index 00000000000..d2a861b23aa --- /dev/null +++ b/chromium/base/test/test_file_util_win.cc @@ -0,0 +1,189 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_file_util.h" + +#include <aclapi.h> +#include <stddef.h> +#include <wchar.h> +#include <windows.h> + +#include <memory> +#include <vector> + +#include "base/check_op.h" +#include "base/files/file_path.h" +#include "base/files/file_util.h" +#include "base/memory/ptr_util.h" +#include "base/notreached.h" +#include "base/strings/string_split.h" +#include "base/strings/string_util.h" +#include "base/threading/platform_thread.h" +#include "base/win/scoped_handle.h" +#include "base/win/shlwapi.h" + +namespace base { + +namespace { + +struct PermissionInfo { + PSECURITY_DESCRIPTOR security_descriptor; + ACL dacl; +}; + +// Gets a blob indicating the permission information for |path|. +// |length| is the length of the blob. Zero on failure. +// Returns the blob pointer, or NULL on failure. +void* GetPermissionInfo(const FilePath& path, size_t* length) { + DCHECK(length != NULL); + *length = 0; + PACL dacl = NULL; + PSECURITY_DESCRIPTOR security_descriptor; + if (GetNamedSecurityInfo(path.value().c_str(), SE_FILE_OBJECT, + DACL_SECURITY_INFORMATION, NULL, NULL, &dacl, NULL, + &security_descriptor) != ERROR_SUCCESS) { + return NULL; + } + DCHECK(dacl != NULL); + + *length = sizeof(PSECURITY_DESCRIPTOR) + dacl->AclSize; + PermissionInfo* info = reinterpret_cast<PermissionInfo*>(new char[*length]); + info->security_descriptor = security_descriptor; + memcpy(&info->dacl, dacl, dacl->AclSize); + + return info; +} + +// Restores the permission information for |path|, given the blob retrieved +// using |GetPermissionInfo()|. +// |info| is the pointer to the blob. +// |length| is the length of the blob. +// Either |info| or |length| may be NULL/0, in which case nothing happens. +bool RestorePermissionInfo(const FilePath& path, void* info, size_t length) { + if (!info || !length) + return false; + + PermissionInfo* perm = reinterpret_cast<PermissionInfo*>(info); + + DWORD rc = SetNamedSecurityInfo(const_cast<wchar_t*>(path.value().c_str()), + SE_FILE_OBJECT, DACL_SECURITY_INFORMATION, + NULL, NULL, &perm->dacl, NULL); + LocalFree(perm->security_descriptor); + + char* char_array = reinterpret_cast<char*>(info); + delete [] char_array; + + return rc == ERROR_SUCCESS; +} + +std::unique_ptr<wchar_t[]> ToCStr(const std::basic_string<wchar_t>& str) { + size_t size = str.size() + 1; + std::unique_ptr<wchar_t[]> ptr = std::make_unique<wchar_t[]>(size); + wcsncpy(ptr.get(), str.c_str(), size); + return ptr; +} + +} // namespace + +bool DieFileDie(const FilePath& file, bool recurse) { + // It turns out that to not induce flakiness a long timeout is needed. + const int kIterations = 25; + const TimeDelta kTimeout = TimeDelta::FromSeconds(10) / kIterations; + + if (!PathExists(file)) + return true; + + // Sometimes Delete fails, so try a few more times. Divide the timeout + // into short chunks, so that if a try succeeds, we won't delay the test + // for too long. + for (int i = 0; i < kIterations; ++i) { + if (DeleteFile(file, recurse)) + return true; + PlatformThread::Sleep(kTimeout); + } + return false; +} + +void SyncPageCacheToDisk() { + // Approximating this with noop. The proper implementation would require + // administrator privilege: + // https://docs.microsoft.com/en-us/windows/desktop/api/FileAPI/nf-fileapi-flushfilebuffers +} + +bool EvictFileFromSystemCache(const FilePath& file) { + win::ScopedHandle file_handle( + CreateFile(file.value().c_str(), GENERIC_READ | GENERIC_WRITE, 0, NULL, + OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, NULL)); + if (!file_handle.IsValid()) + return false; + + // Re-write the file time information to trigger cache eviction for the file. + // This function previously overwrote the entire file without buffering, but + // local experimentation validates this simplified and *much* faster approach: + // [1] Sysinternals RamMap no longer lists these files as cached afterwards. + // [2] Telemetry performance test startup.cold.blank_page reports sane values. + BY_HANDLE_FILE_INFORMATION bhi = {0}; + CHECK(::GetFileInformationByHandle(file_handle.Get(), &bhi)); + CHECK(::SetFileTime(file_handle.Get(), &bhi.ftCreationTime, + &bhi.ftLastAccessTime, &bhi.ftLastWriteTime)); + return true; +} + +// Deny |permission| on the file |path|, for the current user. +bool DenyFilePermission(const FilePath& path, DWORD permission) { + PACL old_dacl; + PSECURITY_DESCRIPTOR security_descriptor; + + std::unique_ptr<TCHAR[]> path_ptr = ToCStr(path.value().c_str()); + if (GetNamedSecurityInfo(path_ptr.get(), SE_FILE_OBJECT, + DACL_SECURITY_INFORMATION, nullptr, nullptr, + &old_dacl, nullptr, + &security_descriptor) != ERROR_SUCCESS) { + return false; + } + + std::unique_ptr<TCHAR[]> current_user = ToCStr(std::wstring(L"CURRENT_USER")); + EXPLICIT_ACCESS new_access = { + permission, + DENY_ACCESS, + 0, + {nullptr, NO_MULTIPLE_TRUSTEE, TRUSTEE_IS_NAME, TRUSTEE_IS_USER, + current_user.get()}}; + + PACL new_dacl; + if (SetEntriesInAcl(1, &new_access, old_dacl, &new_dacl) != ERROR_SUCCESS) { + LocalFree(security_descriptor); + return false; + } + + DWORD rc = SetNamedSecurityInfo(path_ptr.get(), SE_FILE_OBJECT, + DACL_SECURITY_INFORMATION, nullptr, nullptr, + new_dacl, nullptr); + LocalFree(security_descriptor); + LocalFree(new_dacl); + + return rc == ERROR_SUCCESS; +} + +bool MakeFileUnreadable(const FilePath& path) { + return DenyFilePermission(path, GENERIC_READ); +} + +bool MakeFileUnwritable(const FilePath& path) { + return DenyFilePermission(path, GENERIC_WRITE); +} + +FilePermissionRestorer::FilePermissionRestorer(const FilePath& path) + : path_(path), info_(NULL), length_(0) { + info_ = GetPermissionInfo(path_, &length_); + DCHECK(info_ != NULL); + DCHECK_NE(0u, length_); +} + +FilePermissionRestorer::~FilePermissionRestorer() { + if (!RestorePermissionInfo(path_, info_, length_)) + NOTREACHED(); +} + +} // namespace base diff --git a/chromium/base/test/test_io_thread.cc b/chromium/base/test/test_io_thread.cc new file mode 100644 index 00000000000..15e4fe015c6 --- /dev/null +++ b/chromium/base/test/test_io_thread.cc @@ -0,0 +1,45 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_io_thread.h" + +#include "base/check.h" +#include "base/message_loop/message_pump_type.h" + +namespace base { + +TestIOThread::TestIOThread(Mode mode) + : io_thread_("test_io_thread"), io_thread_started_(false) { + switch (mode) { + case kAutoStart: + Start(); + return; + case kManualStart: + return; + } + CHECK(false) << "Invalid mode"; +} + +TestIOThread::~TestIOThread() { + Stop(); +} + +void TestIOThread::Start() { + CHECK(!io_thread_started_); + io_thread_started_ = true; + CHECK(io_thread_.StartWithOptions( + base::Thread::Options(base::MessagePumpType::IO, 0))); +} + +void TestIOThread::Stop() { + // Note: It's okay to call |Stop()| even if the thread isn't running. + io_thread_.Stop(); + io_thread_started_ = false; +} + +void TestIOThread::PostTask(const Location& from_here, base::OnceClosure task) { + task_runner()->PostTask(from_here, std::move(task)); +} + +} // namespace base diff --git a/chromium/base/test/test_io_thread.h b/chromium/base/test/test_io_thread.h new file mode 100644 index 00000000000..c72d4fa509c --- /dev/null +++ b/chromium/base/test/test_io_thread.h @@ -0,0 +1,56 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_IO_THREAD_H_ +#define BASE_TEST_TEST_IO_THREAD_H_ + +#include "base/callback_forward.h" +#include "base/compiler_specific.h" +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/task_runner.h" +#include "base/threading/thread.h" +#include "base/time/time.h" + +namespace base { + +// Create and run an IO thread with a MessageLoop, and +// making the MessageLoop accessible from its client. +// It also provides some ideomatic API like PostTaskAndWait(). +// +// This API is not thread-safe: +// - Start()/Stop() should only be called from the main (creation) thread. +// - PostTask()/message_loop()/task_runner() are also safe to call from the +// underlying thread itself (to post tasks from other threads: get the +// task_runner() from the main thread first, it is then safe to pass _it_ +// around). +class TestIOThread { + public: + enum Mode { kAutoStart, kManualStart }; + explicit TestIOThread(Mode mode); + // Stops the I/O thread if necessary. + ~TestIOThread(); + + // After Stop(), Start() may be called again to start a new I/O thread. + // Stop() may be called even when the I/O thread is not started. + void Start(); + void Stop(); + + // Post |task| to the IO thread. + void PostTask(const Location& from_here, base::OnceClosure task); + + scoped_refptr<SingleThreadTaskRunner> task_runner() { + return io_thread_.task_runner(); + } + + private: + base::Thread io_thread_; + bool io_thread_started_; + + DISALLOW_COPY_AND_ASSIGN(TestIOThread); +}; + +} // namespace base + +#endif // BASE_TEST_TEST_IO_THREAD_H_ diff --git a/chromium/base/test/test_listener_ios.h b/chromium/base/test/test_listener_ios.h new file mode 100644 index 00000000000..c312250065d --- /dev/null +++ b/chromium/base/test/test_listener_ios.h @@ -0,0 +1,17 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_LISTENER_IOS_H_ +#define BASE_TEST_TEST_LISTENER_IOS_H_ + +namespace base { +namespace test_listener_ios { + +// Register an IOSRunLoopListener. +void RegisterTestEndListener(); + +} // namespace test_listener_ios +} // namespace base + +#endif // BASE_TEST_TEST_LISTENER_IOS_H_ diff --git a/chromium/base/test/test_listener_ios.mm b/chromium/base/test/test_listener_ios.mm new file mode 100644 index 00000000000..54aa9acb6d0 --- /dev/null +++ b/chromium/base/test/test_listener_ios.mm @@ -0,0 +1,44 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_listener_ios.h" + +#import <Foundation/Foundation.h> + +#include "testing/gtest/include/gtest/gtest.h" + +// The iOS watchdog timer will kill an app that doesn't spin the main event +// loop often enough. This uses a Gtest TestEventListener to spin the current +// loop after each test finishes. However, if any individual test takes too +// long, it is still possible that the app will get killed. + +namespace { + +class IOSRunLoopListener : public testing::EmptyTestEventListener { + public: + virtual void OnTestEnd(const testing::TestInfo& test_info); +}; + +void IOSRunLoopListener::OnTestEnd(const testing::TestInfo& test_info) { + @autoreleasepool { + // At the end of the test, spin the default loop for a moment. + NSDate* stop_date = [NSDate dateWithTimeIntervalSinceNow:0.001]; + [[NSRunLoop currentRunLoop] runUntilDate:stop_date]; + } +} + +} // namespace + + +namespace base { +namespace test_listener_ios { + +void RegisterTestEndListener() { + testing::TestEventListeners& listeners = + testing::UnitTest::GetInstance()->listeners(); + listeners.Append(new IOSRunLoopListener); +} + +} // namespace test_listener_ios +} // namespace base diff --git a/chromium/base/test/test_message_loop.cc b/chromium/base/test/test_message_loop.cc new file mode 100644 index 00000000000..14f60b0ca02 --- /dev/null +++ b/chromium/base/test/test_message_loop.cc @@ -0,0 +1,50 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_message_loop.h" + +#include "base/compiler_specific.h" +#include "base/message_loop/message_pump_type.h" +#include "base/notreached.h" +#include "base/run_loop.h" +#include "base/test/task_environment.h" +#include "build/build_config.h" + +namespace base { + +namespace { + +test::SingleThreadTaskEnvironment::MainThreadType GetMainThreadType( + MessagePumpType type) { + switch (type) { + case MessagePumpType::DEFAULT: + return test::SingleThreadTaskEnvironment::MainThreadType::DEFAULT; + case MessagePumpType::IO: + return test::SingleThreadTaskEnvironment::MainThreadType::IO; + case MessagePumpType::UI: + return test::SingleThreadTaskEnvironment::MainThreadType::UI; + case MessagePumpType::CUSTOM: +#if defined(OS_ANDROID) + case MessagePumpType::JAVA: +#elif defined(OS_MACOSX) + case MessagePumpType::NS_RUNLOOP: +#elif defined(OS_WIN) + case MessagePumpType::UI_WITH_WM_QUIT_SUPPORT: +#endif + NOTREACHED(); + return test::SingleThreadTaskEnvironment::MainThreadType::DEFAULT; + } +} +} // namespace + +TestMessageLoop::TestMessageLoop() = default; + +TestMessageLoop::TestMessageLoop(MessagePumpType type) + : task_environment_(GetMainThreadType(type)) {} + +TestMessageLoop::~TestMessageLoop() { + RunLoop().RunUntilIdle(); +} + +} // namespace base diff --git a/chromium/base/test/test_message_loop.h b/chromium/base/test/test_message_loop.h new file mode 100644 index 00000000000..3be2ea5c7a2 --- /dev/null +++ b/chromium/base/test/test_message_loop.h @@ -0,0 +1,39 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_MESSAGE_LOOP_H_ +#define BASE_TEST_TEST_MESSAGE_LOOP_H_ + +#include "base/message_loop/message_pump_type.h" +#include "base/single_thread_task_runner.h" +#include "base/test/task_environment.h" + +namespace base { + +// TestMessageLoop is a convenience class for unittests that need to create a +// message loop without a real thread backing it. For most tests, +// it is sufficient to just instantiate TestMessageLoop as a member variable. +// +// TestMessageLoop will attempt to drain the underlying MessageLoop on +// destruction for clean teardown of tests. +// +// TODO(b/891670): Get rid of this and migrate users to +// SingleThreadTaskEnvironment +class TestMessageLoop { + public: + TestMessageLoop(); + explicit TestMessageLoop(MessagePumpType type); + ~TestMessageLoop(); + + scoped_refptr<SingleThreadTaskRunner> task_runner() { + return task_environment_.GetMainThreadTaskRunner(); + } + + private: + test::SingleThreadTaskEnvironment task_environment_; +}; + +} // namespace base + +#endif // BASE_TEST_TEST_MESSAGE_LOOP_H_ diff --git a/chromium/base/test/test_mock_time_task_runner.cc b/chromium/base/test/test_mock_time_task_runner.cc new file mode 100644 index 00000000000..63aacf948d5 --- /dev/null +++ b/chromium/base/test/test_mock_time_task_runner.cc @@ -0,0 +1,501 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_mock_time_task_runner.h" + +#include <utility> + +#include "base/check_op.h" +#include "base/containers/circular_deque.h" +#include "base/macros.h" +#include "base/memory/ptr_util.h" +#include "base/memory/ref_counted.h" +#include "base/threading/thread_task_runner_handle.h" + +namespace base { +namespace { + +// LegacyMockTickClock and LegacyMockClock are used by deprecated APIs of +// TestMockTimeTaskRunner. They will be removed after updating callers of +// GetMockClock() and GetMockTickClock() to GetMockClockPtr() and +// GetMockTickClockPtr(). +class LegacyMockTickClock : public TickClock { + public: + explicit LegacyMockTickClock( + scoped_refptr<const TestMockTimeTaskRunner> task_runner) + : task_runner_(std::move(task_runner)) {} + + // TickClock: + TimeTicks NowTicks() const override { return task_runner_->NowTicks(); } + + private: + scoped_refptr<const TestMockTimeTaskRunner> task_runner_; + + DISALLOW_COPY_AND_ASSIGN(LegacyMockTickClock); +}; + +class LegacyMockClock : public Clock { + public: + explicit LegacyMockClock( + scoped_refptr<const TestMockTimeTaskRunner> task_runner) + : task_runner_(std::move(task_runner)) {} + + // Clock: + Time Now() const override { return task_runner_->Now(); } + + private: + scoped_refptr<const TestMockTimeTaskRunner> task_runner_; + + DISALLOW_COPY_AND_ASSIGN(LegacyMockClock); +}; + +} // namespace + +// A SingleThreadTaskRunner which forwards everything to its |target_|. This +// serves two purposes: +// 1) If a ThreadTaskRunnerHandle owned by TestMockTimeTaskRunner were to be +// set to point to that TestMockTimeTaskRunner, a reference cycle would +// result. As |target_| here is a non-refcounting raw pointer, the cycle is +// broken. +// 2) Since SingleThreadTaskRunner is ref-counted, it's quite easy for it to +// accidentally get captured between tests in a singleton somewhere. +// Indirecting via NonOwningProxyTaskRunner permits TestMockTimeTaskRunner +// to be cleaned up (removing the RunLoop::Delegate in the kBoundToThread +// mode), and to also cleanly flag any actual attempts to use the leaked +// task runner. +class TestMockTimeTaskRunner::NonOwningProxyTaskRunner + : public SingleThreadTaskRunner { + public: + explicit NonOwningProxyTaskRunner(SingleThreadTaskRunner* target) + : target_(target) { + DCHECK(target_); + } + + // Detaches this NonOwningProxyTaskRunner instance from its |target_|. It is + // invalid to post tasks after this point but RunsTasksInCurrentSequence() + // will still pass on the original thread for convenience with legacy code. + void Detach() { + AutoLock scoped_lock(lock_); + target_ = nullptr; + } + + // SingleThreadTaskRunner: + bool RunsTasksInCurrentSequence() const override { + AutoLock scoped_lock(lock_); + if (target_) + return target_->RunsTasksInCurrentSequence(); + return thread_checker_.CalledOnValidThread(); + } + + bool PostDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) override { + AutoLock scoped_lock(lock_); + if (target_) + return target_->PostDelayedTask(from_here, std::move(task), delay); + + // The associated TestMockTimeTaskRunner is dead, so fail this PostTask. + return false; + } + + bool PostNonNestableDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) override { + AutoLock scoped_lock(lock_); + if (target_) { + return target_->PostNonNestableDelayedTask(from_here, std::move(task), + delay); + } + + // The associated TestMockTimeTaskRunner is dead, so fail this PostTask. + return false; + } + + private: + friend class RefCountedThreadSafe<NonOwningProxyTaskRunner>; + ~NonOwningProxyTaskRunner() override = default; + + mutable Lock lock_; + SingleThreadTaskRunner* target_; // guarded by lock_ + + // Used to implement RunsTasksInCurrentSequence, without relying on |target_|. + ThreadCheckerImpl thread_checker_; + + DISALLOW_COPY_AND_ASSIGN(NonOwningProxyTaskRunner); +}; + +// TestMockTimeTaskRunner::TestOrderedPendingTask ----------------------------- + +// Subclass of TestPendingTask which has a strictly monotonically increasing ID +// for every task, so that tasks posted with the same 'time to run' can be run +// in the order of being posted. +struct TestMockTimeTaskRunner::TestOrderedPendingTask + : public base::TestPendingTask { + TestOrderedPendingTask(); + TestOrderedPendingTask(const Location& location, + OnceClosure task, + TimeTicks post_time, + TimeDelta delay, + size_t ordinal, + TestNestability nestability); + TestOrderedPendingTask(TestOrderedPendingTask&&); + ~TestOrderedPendingTask(); + + TestOrderedPendingTask& operator=(TestOrderedPendingTask&&); + + size_t ordinal; + + private: + DISALLOW_COPY_AND_ASSIGN(TestOrderedPendingTask); +}; + +TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask() + : ordinal(0) { +} + +TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask( + TestOrderedPendingTask&&) = default; + +TestMockTimeTaskRunner::TestOrderedPendingTask::TestOrderedPendingTask( + const Location& location, + OnceClosure task, + TimeTicks post_time, + TimeDelta delay, + size_t ordinal, + TestNestability nestability) + : base::TestPendingTask(location, + std::move(task), + post_time, + delay, + nestability), + ordinal(ordinal) {} + +TestMockTimeTaskRunner::TestOrderedPendingTask::~TestOrderedPendingTask() = + default; + +TestMockTimeTaskRunner::TestOrderedPendingTask& +TestMockTimeTaskRunner::TestOrderedPendingTask::operator=( + TestOrderedPendingTask&&) = default; + +// TestMockTimeTaskRunner ----------------------------------------------------- + +// TODO(gab): This should also set the SequenceToken for the current thread. +// Ref. TestMockTimeTaskRunner::RunsTasksInCurrentSequence(). +TestMockTimeTaskRunner::ScopedContext::ScopedContext( + scoped_refptr<TestMockTimeTaskRunner> scope) + : on_destroy_(ThreadTaskRunnerHandle::OverrideForTesting(scope)) { + scope->RunUntilIdle(); +} + +TestMockTimeTaskRunner::ScopedContext::~ScopedContext() = default; + +bool TestMockTimeTaskRunner::TemporalOrder::operator()( + const TestOrderedPendingTask& first_task, + const TestOrderedPendingTask& second_task) const { + if (first_task.GetTimeToRun() == second_task.GetTimeToRun()) + return first_task.ordinal > second_task.ordinal; + return first_task.GetTimeToRun() > second_task.GetTimeToRun(); +} + +TestMockTimeTaskRunner::TestMockTimeTaskRunner(Type type) + : TestMockTimeTaskRunner(Time::UnixEpoch(), TimeTicks(), type) {} + +TestMockTimeTaskRunner::TestMockTimeTaskRunner(Time start_time, + TimeTicks start_ticks, + Type type) + : now_(start_time), + now_ticks_(start_ticks), + tasks_lock_cv_(&tasks_lock_), + proxy_task_runner_(MakeRefCounted<NonOwningProxyTaskRunner>(this)), + mock_clock_(this) { + if (type == Type::kBoundToThread) { + RunLoop::RegisterDelegateForCurrentThread(this); + thread_task_runner_handle_ = + std::make_unique<ThreadTaskRunnerHandle>(proxy_task_runner_); + } +} + +TestMockTimeTaskRunner::~TestMockTimeTaskRunner() { + proxy_task_runner_->Detach(); +} + +void TestMockTimeTaskRunner::FastForwardBy(TimeDelta delta) { + DCHECK(thread_checker_.CalledOnValidThread()); + DCHECK_GE(delta, TimeDelta()); + + const TimeTicks original_now_ticks = NowTicks(); + ProcessAllTasksNoLaterThan(delta); + ForwardClocksUntilTickTime(original_now_ticks + delta); +} + +void TestMockTimeTaskRunner::AdvanceMockTickClock(TimeDelta delta) { + ForwardClocksUntilTickTime(NowTicks() + delta); +} + +void TestMockTimeTaskRunner::RunUntilIdle() { + DCHECK(thread_checker_.CalledOnValidThread()); + ProcessAllTasksNoLaterThan(TimeDelta()); +} + +void TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() { + DCHECK(thread_checker_.CalledOnValidThread()); + ProcessAllTasksNoLaterThan(TimeDelta::Max()); +} + +void TestMockTimeTaskRunner::ClearPendingTasks() { + AutoLock scoped_lock(tasks_lock_); + // This is repeated in case task destruction triggers further tasks. + while (!tasks_.empty()) { + TaskPriorityQueue cleanup_tasks; + tasks_.swap(cleanup_tasks); + + // Destroy task objects with |tasks_lock_| released. Task deletion can cause + // calls to NonOwningProxyTaskRunner::RunsTasksInCurrentSequence() + // (e.g. for DCHECKs), which causes |NonOwningProxyTaskRunner::lock_| to be + // grabbed. + // + // On the other hand, calls from NonOwningProxyTaskRunner::PostTask -> + // TestMockTimeTaskRunner::PostTask acquire locks as + // |NonOwningProxyTaskRunner::lock_| followed by |tasks_lock_|, so it's + // desirable to avoid the reverse order, for deadlock freedom. + AutoUnlock scoped_unlock(tasks_lock_); + while (!cleanup_tasks.empty()) + cleanup_tasks.pop(); + } +} + +Time TestMockTimeTaskRunner::Now() const { + AutoLock scoped_lock(tasks_lock_); + return now_; +} + +TimeTicks TestMockTimeTaskRunner::NowTicks() const { + AutoLock scoped_lock(tasks_lock_); + return now_ticks_; +} + +std::unique_ptr<Clock> TestMockTimeTaskRunner::DeprecatedGetMockClock() const { + DCHECK(thread_checker_.CalledOnValidThread()); + return std::make_unique<LegacyMockClock>(this); +} + +Clock* TestMockTimeTaskRunner::GetMockClock() const { + DCHECK(thread_checker_.CalledOnValidThread()); + return &mock_clock_; +} + +std::unique_ptr<TickClock> TestMockTimeTaskRunner::DeprecatedGetMockTickClock() + const { + DCHECK(thread_checker_.CalledOnValidThread()); + return std::make_unique<LegacyMockTickClock>(this); +} + +const TickClock* TestMockTimeTaskRunner::GetMockTickClock() const { + DCHECK(thread_checker_.CalledOnValidThread()); + return &mock_clock_; +} + +base::circular_deque<TestPendingTask> +TestMockTimeTaskRunner::TakePendingTasks() { + AutoLock scoped_lock(tasks_lock_); + base::circular_deque<TestPendingTask> tasks; + while (!tasks_.empty()) { + // It's safe to remove const and consume |task| here, since |task| is not + // used for ordering the item. + if (!tasks_.top().task.IsCancelled()) { + tasks.push_back( + std::move(const_cast<TestOrderedPendingTask&>(tasks_.top()))); + } + tasks_.pop(); + } + return tasks; +} + +bool TestMockTimeTaskRunner::HasPendingTask() { + DCHECK(thread_checker_.CalledOnValidThread()); + AutoLock scoped_lock(tasks_lock_); + while (!tasks_.empty() && tasks_.top().task.IsCancelled()) + tasks_.pop(); + return !tasks_.empty(); +} + +size_t TestMockTimeTaskRunner::GetPendingTaskCount() { + DCHECK(thread_checker_.CalledOnValidThread()); + AutoLock scoped_lock(tasks_lock_); + TaskPriorityQueue preserved_tasks; + while (!tasks_.empty()) { + if (!tasks_.top().task.IsCancelled()) { + preserved_tasks.push( + std::move(const_cast<TestOrderedPendingTask&>(tasks_.top()))); + } + tasks_.pop(); + } + tasks_.swap(preserved_tasks); + return tasks_.size(); +} + +TimeDelta TestMockTimeTaskRunner::NextPendingTaskDelay() { + DCHECK(thread_checker_.CalledOnValidThread()); + AutoLock scoped_lock(tasks_lock_); + while (!tasks_.empty() && tasks_.top().task.IsCancelled()) + tasks_.pop(); + return tasks_.empty() ? TimeDelta::Max() + : tasks_.top().GetTimeToRun() - now_ticks_; +} + +// TODO(gab): Combine |thread_checker_| with a SequenceToken to differentiate +// between tasks running in the scope of this TestMockTimeTaskRunner and other +// task runners sharing this thread. http://crbug.com/631186 +bool TestMockTimeTaskRunner::RunsTasksInCurrentSequence() const { + return thread_checker_.CalledOnValidThread(); +} + +bool TestMockTimeTaskRunner::PostDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) { + AutoLock scoped_lock(tasks_lock_); + tasks_.push(TestOrderedPendingTask(from_here, std::move(task), now_ticks_, + delay, next_task_ordinal_++, + TestPendingTask::NESTABLE)); + tasks_lock_cv_.Signal(); + return true; +} + +bool TestMockTimeTaskRunner::PostNonNestableDelayedTask( + const Location& from_here, + OnceClosure task, + TimeDelta delay) { + return PostDelayedTask(from_here, std::move(task), delay); +} + +void TestMockTimeTaskRunner::OnBeforeSelectingTask() { + // Empty default implementation. +} + +void TestMockTimeTaskRunner::OnAfterTimePassed() { + // Empty default implementation. +} + +void TestMockTimeTaskRunner::OnAfterTaskRun() { + // Empty default implementation. +} + +void TestMockTimeTaskRunner::ProcessAllTasksNoLaterThan(TimeDelta max_delta) { + DCHECK(thread_checker_.CalledOnValidThread()); + DCHECK_GE(max_delta, TimeDelta()); + + // Multiple test task runners can share the same thread for determinism in + // unit tests. Make sure this TestMockTimeTaskRunner's tasks run in its scope. + ScopedClosureRunner undo_override; + if (!ThreadTaskRunnerHandle::IsSet() || + ThreadTaskRunnerHandle::Get() != proxy_task_runner_.get()) { + undo_override = + ThreadTaskRunnerHandle::OverrideForTesting(proxy_task_runner_.get()); + } + + const TimeTicks original_now_ticks = NowTicks(); + while (!quit_run_loop_) { + OnBeforeSelectingTask(); + TestPendingTask task_info; + if (!DequeueNextTask(original_now_ticks, max_delta, &task_info)) + break; + if (task_info.task.IsCancelled()) + continue; + // If tasks were posted with a negative delay, task_info.GetTimeToRun() will + // be less than |now_ticks_|. ForwardClocksUntilTickTime() takes care of not + // moving the clock backwards in this case. + ForwardClocksUntilTickTime(task_info.GetTimeToRun()); + std::move(task_info.task).Run(); + OnAfterTaskRun(); + } +} + +void TestMockTimeTaskRunner::ForwardClocksUntilTickTime(TimeTicks later_ticks) { + DCHECK(thread_checker_.CalledOnValidThread()); + { + AutoLock scoped_lock(tasks_lock_); + if (later_ticks <= now_ticks_) + return; + + now_ += later_ticks - now_ticks_; + now_ticks_ = later_ticks; + } + OnAfterTimePassed(); +} + +bool TestMockTimeTaskRunner::DequeueNextTask(const TimeTicks& reference, + const TimeDelta& max_delta, + TestPendingTask* next_task) { + DCHECK(thread_checker_.CalledOnValidThread()); + AutoLock scoped_lock(tasks_lock_); + if (!tasks_.empty() && + (tasks_.top().GetTimeToRun() - reference) <= max_delta) { + // It's safe to remove const and consume |task| here, since |task| is not + // used for ordering the item. + *next_task = std::move(const_cast<TestOrderedPendingTask&>(tasks_.top())); + tasks_.pop(); + return true; + } + return false; +} + +void TestMockTimeTaskRunner::Run(bool application_tasks_allowed, + TimeDelta timeout) { + DCHECK(thread_checker_.CalledOnValidThread()); + + // Since TestMockTimeTaskRunner doesn't process system messages: there's no + // hope for anything but an application task to call Quit(). If this RunLoop + // can't process application tasks (i.e. disallowed by default in nested + // RunLoops) it's guaranteed to hang... + DCHECK(application_tasks_allowed) + << "This is a nested RunLoop instance and needs to be of " + "Type::kNestableTasksAllowed."; + + // This computation relies on saturated arithmetic. + TimeTicks run_until = now_ticks_ + timeout; + while (!quit_run_loop_ && now_ticks_ < run_until) { + RunUntilIdle(); + if (quit_run_loop_ || ShouldQuitWhenIdle()) + break; + + // Peek into |tasks_| to perform one of two things: + // A) If there are no remaining tasks, wait until one is posted and + // restart from the top. + // B) If there is a remaining delayed task. Fast-forward to reach the next + // round of tasks. + TimeDelta auto_fast_forward_by; + { + AutoLock scoped_lock(tasks_lock_); + if (tasks_.empty()) { + while (tasks_.empty()) + tasks_lock_cv_.Wait(); + continue; + } + auto_fast_forward_by = + std::min(run_until, tasks_.top().GetTimeToRun()) - now_ticks_; + } + FastForwardBy(auto_fast_forward_by); + } + quit_run_loop_ = false; +} + +void TestMockTimeTaskRunner::Quit() { + DCHECK(thread_checker_.CalledOnValidThread()); + quit_run_loop_ = true; +} + +void TestMockTimeTaskRunner::EnsureWorkScheduled() { + // Nothing to do: TestMockTimeTaskRunner::Run() will always process tasks and + // doesn't need an extra kick on nested runs. +} + +TimeTicks TestMockTimeTaskRunner::MockClock::NowTicks() const { + return task_runner_->NowTicks(); +} + +Time TestMockTimeTaskRunner::MockClock::Now() const { + return task_runner_->Now(); +} + +} // namespace base diff --git a/chromium/base/test/test_mock_time_task_runner_unittest.cc b/chromium/base/test/test_mock_time_task_runner_unittest.cc new file mode 100644 index 00000000000..80805298ada --- /dev/null +++ b/chromium/base/test/test_mock_time_task_runner_unittest.cc @@ -0,0 +1,298 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_mock_time_task_runner.h" + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/cancelable_callback.h" +#include "base/memory/ref_counted.h" +#include "base/run_loop.h" +#include "base/test/bind_test_util.h" +#include "base/test/gtest_util.h" +#include "base/test/test_timeouts.h" +#include "base/threading/sequenced_task_runner_handle.h" +#include "base/threading/thread.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/time/time.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +// Basic usage should work the same from default and bound +// TestMockTimeTaskRunners. +TEST(TestMockTimeTaskRunnerTest, Basic) { + static constexpr TestMockTimeTaskRunner::Type kTestCases[] = { + TestMockTimeTaskRunner::Type::kStandalone, + TestMockTimeTaskRunner::Type::kBoundToThread}; + + for (auto type : kTestCases) { + SCOPED_TRACE(static_cast<int>(type)); + + auto mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(type); + int counter = 0; + + mock_time_task_runner->PostTask( + FROM_HERE, base::BindOnce([](int* counter) { *counter += 1; }, + Unretained(&counter))); + mock_time_task_runner->PostTask( + FROM_HERE, base::BindOnce([](int* counter) { *counter += 32; }, + Unretained(&counter))); + mock_time_task_runner->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 256; }, + Unretained(&counter)), + TimeDelta::FromSeconds(3)); + mock_time_task_runner->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 64; }, + Unretained(&counter)), + TimeDelta::FromSeconds(1)); + mock_time_task_runner->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 1024; }, + Unretained(&counter)), + TimeDelta::FromMinutes(20)); + mock_time_task_runner->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 4096; }, + Unretained(&counter)), + TimeDelta::FromDays(20)); + + int expected_value = 0; + EXPECT_EQ(expected_value, counter); + mock_time_task_runner->RunUntilIdle(); + expected_value += 1; + expected_value += 32; + EXPECT_EQ(expected_value, counter); + + mock_time_task_runner->RunUntilIdle(); + EXPECT_EQ(expected_value, counter); + + mock_time_task_runner->FastForwardBy(TimeDelta::FromSeconds(1)); + expected_value += 64; + EXPECT_EQ(expected_value, counter); + + mock_time_task_runner->FastForwardBy(TimeDelta::FromSeconds(5)); + expected_value += 256; + EXPECT_EQ(expected_value, counter); + + mock_time_task_runner->FastForwardUntilNoTasksRemain(); + expected_value += 1024; + expected_value += 4096; + EXPECT_EQ(expected_value, counter); + } +} + +// A default TestMockTimeTaskRunner shouldn't result in a thread association. +TEST(TestMockTimeTaskRunnerTest, DefaultUnbound) { + auto unbound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>(); + EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet()); + EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet()); + EXPECT_DEATH_IF_SUPPORTED({ RunLoop().RunUntilIdle(); }, ""); +} + +TEST(TestMockTimeTaskRunnerTest, RunLoopDriveableWhenBound) { + auto bound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>( + TestMockTimeTaskRunner::Type::kBoundToThread); + + int counter = 0; + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce([](int* counter) { *counter += 1; }, + Unretained(&counter))); + ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::BindOnce([](int* counter) { *counter += 32; }, + Unretained(&counter))); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 256; }, + Unretained(&counter)), + TimeDelta::FromSeconds(3)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 64; }, + Unretained(&counter)), + TimeDelta::FromSeconds(1)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 1024; }, + Unretained(&counter)), + TimeDelta::FromMinutes(20)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 4096; }, + Unretained(&counter)), + TimeDelta::FromDays(20)); + + int expected_value = 0; + EXPECT_EQ(expected_value, counter); + RunLoop().RunUntilIdle(); + expected_value += 1; + expected_value += 32; + EXPECT_EQ(expected_value, counter); + + RunLoop().RunUntilIdle(); + EXPECT_EQ(expected_value, counter); + + { + RunLoop run_loop; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitClosure(), TimeDelta::FromSeconds(1)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 8192; }, + Unretained(&counter)), + TimeDelta::FromSeconds(1)); + + // The QuitClosure() should be ordered between the 64 and the 8192 + // increments and should preempt the latter. + run_loop.Run(); + expected_value += 64; + EXPECT_EQ(expected_value, counter); + + // Running until idle should process the 8192 increment whose delay has + // expired in the previous Run(). + RunLoop().RunUntilIdle(); + expected_value += 8192; + EXPECT_EQ(expected_value, counter); + } + + { + RunLoop run_loop; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromSeconds(5)); + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, + base::BindOnce([](int* counter) { *counter += 16384; }, + Unretained(&counter)), + TimeDelta::FromSeconds(5)); + + // The QuitWhenIdleClosure() shouldn't preempt equally delayed tasks and as + // such the 16384 increment should be processed before quitting. + run_loop.Run(); + expected_value += 256; + expected_value += 16384; + EXPECT_EQ(expected_value, counter); + } + + // Process the remaining tasks (note: do not mimic this elsewhere, + // TestMockTimeTaskRunner::FastForwardUntilNoTasksRemain() is a better API to + // do this, this is just done here for the purpose of extensively testing the + // RunLoop approach). + RunLoop run_loop; + ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitWhenIdleClosure(), TimeDelta::FromDays(50)); + + run_loop.Run(); + expected_value += 1024; + expected_value += 4096; + EXPECT_EQ(expected_value, counter); +} + +TEST(TestMockTimeTaskRunnerTest, AvoidCaptureWhenBound) { + // Make sure that capturing the active task runner --- which sometimes happens + // unknowingly due to ThreadsafeObserverList deep within some singleton --- + // does not keep the entire TestMockTimeTaskRunner alive, as in bound mode + // that's a RunLoop::Delegate, and leaking that renders any further tests that + // need RunLoop support unrunnable. + // + // (This used to happen when code run from ProcessAllTasksNoLaterThan grabbed + // the runner.). + scoped_refptr<SingleThreadTaskRunner> captured; + { + auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>( + TestMockTimeTaskRunner::Type::kBoundToThread); + + task_runner->PostTask(FROM_HERE, base::BindLambdaForTesting([&]() { + captured = ThreadTaskRunnerHandle::Get(); + })); + task_runner->RunUntilIdle(); + } + + { + // This should not complain about RunLoop::Delegate already existing. + auto task_runner2 = MakeRefCounted<TestMockTimeTaskRunner>( + TestMockTimeTaskRunner::Type::kBoundToThread); + } +} + +// Regression test that receiving the quit-when-idle signal when already empty +// works as intended (i.e. that |TestMockTimeTaskRunner::tasks_lock_cv| is +// properly signaled). +TEST(TestMockTimeTaskRunnerTest, RunLoopQuitFromIdle) { + auto bound_mock_time_task_runner = MakeRefCounted<TestMockTimeTaskRunner>( + TestMockTimeTaskRunner::Type::kBoundToThread); + + Thread quitting_thread("quitting thread"); + quitting_thread.Start(); + + RunLoop run_loop; + quitting_thread.task_runner()->PostDelayedTask( + FROM_HERE, run_loop.QuitWhenIdleClosure(), TestTimeouts::tiny_timeout()); + run_loop.Run(); +} + +TEST(TestMockTimeTaskRunnerTest, TakePendingTasks) { + auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>(); + task_runner->PostTask(FROM_HERE, DoNothing()); + EXPECT_TRUE(task_runner->HasPendingTask()); + EXPECT_EQ(1u, task_runner->TakePendingTasks().size()); + EXPECT_FALSE(task_runner->HasPendingTask()); +} + +TEST(TestMockTimeTaskRunnerTest, CancelPendingTask) { + auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>(); + CancelableOnceClosure task1(DoNothing::Once()); + task_runner->PostDelayedTask(FROM_HERE, task1.callback(), + TimeDelta::FromSeconds(1)); + EXPECT_TRUE(task_runner->HasPendingTask()); + EXPECT_EQ(1u, task_runner->GetPendingTaskCount()); + EXPECT_EQ(TimeDelta::FromSeconds(1), task_runner->NextPendingTaskDelay()); + task1.Cancel(); + EXPECT_FALSE(task_runner->HasPendingTask()); + + CancelableOnceClosure task2(DoNothing::Once()); + task_runner->PostDelayedTask(FROM_HERE, task2.callback(), + TimeDelta::FromSeconds(1)); + task2.Cancel(); + EXPECT_EQ(0u, task_runner->GetPendingTaskCount()); + + CancelableOnceClosure task3(DoNothing::Once()); + task_runner->PostDelayedTask(FROM_HERE, task3.callback(), + TimeDelta::FromSeconds(1)); + task3.Cancel(); + EXPECT_EQ(TimeDelta::Max(), task_runner->NextPendingTaskDelay()); + + CancelableOnceClosure task4(DoNothing::Once()); + task_runner->PostDelayedTask(FROM_HERE, task4.callback(), + TimeDelta::FromSeconds(1)); + task4.Cancel(); + EXPECT_TRUE(task_runner->TakePendingTasks().empty()); +} + +TEST(TestMockTimeTaskRunnerTest, NoFastForwardToCancelledTask) { + auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>(); + TimeTicks start_time = task_runner->NowTicks(); + CancelableOnceClosure task(DoNothing::Once()); + task_runner->PostDelayedTask(FROM_HERE, task.callback(), + TimeDelta::FromSeconds(1)); + EXPECT_EQ(TimeDelta::FromSeconds(1), task_runner->NextPendingTaskDelay()); + task.Cancel(); + task_runner->FastForwardUntilNoTasksRemain(); + EXPECT_EQ(start_time, task_runner->NowTicks()); +} + +TEST(TestMockTimeTaskRunnerTest, AdvanceMockTickClockDoesNotRunTasks) { + auto task_runner = MakeRefCounted<TestMockTimeTaskRunner>(); + TimeTicks start_time = task_runner->NowTicks(); + task_runner->PostTask(FROM_HERE, BindOnce([]() { ADD_FAILURE(); })); + task_runner->PostDelayedTask(FROM_HERE, BindOnce([]() { ADD_FAILURE(); }), + TimeDelta::FromSeconds(1)); + + task_runner->AdvanceMockTickClock(TimeDelta::FromSeconds(3)); + EXPECT_EQ(start_time + TimeDelta::FromSeconds(3), task_runner->NowTicks()); + EXPECT_EQ(2u, task_runner->GetPendingTaskCount()); +} + +} // namespace base diff --git a/chromium/base/test/test_reg_util_win.cc b/chromium/base/test/test_reg_util_win.cc new file mode 100644 index 00000000000..6bcfe60bfeb --- /dev/null +++ b/chromium/base/test/test_reg_util_win.cc @@ -0,0 +1,120 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_reg_util_win.h" + +#include <stdint.h> + +#include "base/guid.h" +#include "base/memory/ptr_util.h" +#include "base/strings/strcat.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/string_split.h" +#include "base/strings/string_util.h" +#include "base/strings/utf_string_conversions.h" +#include "testing/gtest/include/gtest/gtest.h" + +#include <windows.h> + +namespace registry_util { + +namespace { + +constexpr base::char16 kTimestampDelimiter[] = STRING16_LITERAL("$"); +constexpr wchar_t kTempTestKeyPath[] = L"Software\\Chromium\\TempTestKeys"; + +void DeleteStaleTestKeys(const base::Time& now, + const std::wstring& test_key_root) { + base::win::RegKey test_root_key; + if (test_root_key.Open(HKEY_CURRENT_USER, + test_key_root.c_str(), + KEY_ALL_ACCESS) != ERROR_SUCCESS) { + // This will occur on first-run, but is harmless. + return; + } + + base::win::RegistryKeyIterator iterator_test_root_key(HKEY_CURRENT_USER, + test_key_root.c_str()); + for (; iterator_test_root_key.Valid(); ++iterator_test_root_key) { + std::wstring key_name = iterator_test_root_key.Name(); + std::vector<base::StringPiece16> tokens = base::SplitStringPiece( + base::AsStringPiece16(key_name), kTimestampDelimiter, + base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); + if (tokens.empty()) + continue; + int64_t key_name_as_number = 0; + + if (!base::StringToInt64(tokens[0], &key_name_as_number)) { + test_root_key.DeleteKey(key_name.c_str()); + continue; + } + + base::Time key_time = base::Time::FromInternalValue(key_name_as_number); + base::TimeDelta age = now - key_time; + + if (age > base::TimeDelta::FromHours(24)) + test_root_key.DeleteKey(key_name.c_str()); + } +} + +std::wstring GenerateTempKeyPath(const std::wstring& test_key_root, + const base::Time& timestamp) { + return base::AsWString(base::StrCat( + {base::AsStringPiece16(test_key_root), STRING16_LITERAL("\\"), + base::NumberToString16(timestamp.ToInternalValue()), kTimestampDelimiter, + base::ASCIIToUTF16(base::GenerateGUID())})); +} + +} // namespace + +RegistryOverrideManager::ScopedRegistryKeyOverride::ScopedRegistryKeyOverride( + HKEY override, + const std::wstring& key_path) + : override_(override), key_path_(key_path) {} + +RegistryOverrideManager:: + ScopedRegistryKeyOverride::~ScopedRegistryKeyOverride() { + ::RegOverridePredefKey(override_, NULL); + base::win::RegKey(HKEY_CURRENT_USER, L"", KEY_QUERY_VALUE) + .DeleteKey(key_path_.c_str()); +} + +RegistryOverrideManager::RegistryOverrideManager() + : timestamp_(base::Time::Now()), test_key_root_(kTempTestKeyPath) { + DeleteStaleTestKeys(timestamp_, test_key_root_); +} + +RegistryOverrideManager::RegistryOverrideManager( + const base::Time& timestamp, + const std::wstring& test_key_root) + : timestamp_(timestamp), test_key_root_(test_key_root) { + DeleteStaleTestKeys(timestamp_, test_key_root_); +} + +RegistryOverrideManager::~RegistryOverrideManager() {} + +void RegistryOverrideManager::OverrideRegistry(HKEY override) { + OverrideRegistry(override, nullptr); +} + +void RegistryOverrideManager::OverrideRegistry(HKEY override, + std::wstring* override_path) { + std::wstring key_path = GenerateTempKeyPath(test_key_root_, timestamp_); + + base::win::RegKey temp_key; + ASSERT_EQ(ERROR_SUCCESS, temp_key.Create(HKEY_CURRENT_USER, key_path.c_str(), + KEY_ALL_ACCESS)); + ASSERT_EQ(ERROR_SUCCESS, ::RegOverridePredefKey(override, temp_key.Handle())); + + overrides_.push_back( + std::make_unique<ScopedRegistryKeyOverride>(override, key_path)); + if (override_path) + override_path->assign(key_path); +} + +std::wstring GenerateTempKeyPath() { + return GenerateTempKeyPath(kTempTestKeyPath, base::Time::Now()); +} + +} // namespace registry_util diff --git a/chromium/base/test/test_reg_util_win.h b/chromium/base/test/test_reg_util_win.h new file mode 100644 index 00000000000..0592ff7176c --- /dev/null +++ b/chromium/base/test/test_reg_util_win.h @@ -0,0 +1,82 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_REG_UTIL_WIN_H_ +#define BASE_TEST_TEST_REG_UTIL_WIN_H_ + +// Registry utility functions used only by tests. +#include <memory> +#include <string> +#include <vector> + +#include "base/macros.h" +#include "base/time/time.h" +#include "base/win/registry.h" + +namespace registry_util { + +// Allows a test to easily override registry hives so that it can start from a +// known good state, or make sure to not leave any side effects once the test +// completes. This supports parallel tests. All the overrides are scoped to the +// lifetime of the override manager. Destroy the manager to undo the overrides. +// +// Overridden hives use keys stored at, for instance: +// HKCU\Software\Chromium\TempTestKeys\ +// 13028145911617809$02AB211C-CF73-478D-8D91-618E11998AED +// The key path are comprises of: +// - The test key root, HKCU\Software\Chromium\TempTestKeys\ +// - The base::Time::ToInternalValue of the creation time. This is used to +// delete stale keys left over from crashed tests. +// - A GUID used for preventing name collisions (although unlikely) between +// two RegistryOverrideManagers created with the same timestamp. +class RegistryOverrideManager { + public: + RegistryOverrideManager(); + ~RegistryOverrideManager(); + + // Override the given registry hive using a randomly generated temporary key. + // Multiple overrides to the same hive are not supported and lead to undefined + // behavior. + // Optional return of the registry override path. + // Calls to these functions must be wrapped in ASSERT_NO_FATAL_FAILURE to + // ensure that tests do not proceeed in case of failure to override. + void OverrideRegistry(HKEY override); + void OverrideRegistry(HKEY override, std::wstring* override_path); + + private: + friend class RegistryOverrideManagerTest; + + // Keeps track of one override. + class ScopedRegistryKeyOverride { + public: + ScopedRegistryKeyOverride(HKEY override, const std::wstring& key_path); + ~ScopedRegistryKeyOverride(); + + private: + HKEY override_; + std::wstring key_path_; + + DISALLOW_COPY_AND_ASSIGN(ScopedRegistryKeyOverride); + }; + + // Used for testing only. + RegistryOverrideManager(const base::Time& timestamp, + const std::wstring& test_key_root); + + base::Time timestamp_; + std::wstring guid_; + + std::wstring test_key_root_; + std::vector<std::unique_ptr<ScopedRegistryKeyOverride>> overrides_; + + DISALLOW_COPY_AND_ASSIGN(RegistryOverrideManager); +}; + +// Generates a temporary key path that will be eventually deleted +// automatically if the process crashes. +std::wstring GenerateTempKeyPath(); + +} // namespace registry_util + +#endif // BASE_TEST_TEST_REG_UTIL_WIN_H_ diff --git a/chromium/base/test/test_reg_util_win_unittest.cc b/chromium/base/test/test_reg_util_win_unittest.cc new file mode 100644 index 00000000000..12c1d6b9970 --- /dev/null +++ b/chromium/base/test/test_reg_util_win_unittest.cc @@ -0,0 +1,134 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_reg_util_win.h" + +#include <memory> + +#include "base/compiler_specific.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/string_util.h" +#include "base/strings/utf_string_conversions.h" +#include "base/time/time.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace registry_util { + +namespace { +const wchar_t kTestKeyPath[] = L"Software\\Chromium\\Foo\\Baz\\TestKey"; +const wchar_t kTestValueName[] = L"TestValue"; +} // namespace + +class RegistryOverrideManagerTest : public testing::Test { + protected: + RegistryOverrideManagerTest() { + // We assign a fake test key path to our test RegistryOverrideManager + // so we don't interfere with any actual RegistryOverrideManagers running + // on the system. This fake path will be auto-deleted by other + // RegistryOverrideManagers in case we crash. + fake_test_key_root_ = registry_util::GenerateTempKeyPath(); + + // Ensure a clean test environment. + base::win::RegKey key(HKEY_CURRENT_USER); + key.DeleteKey(fake_test_key_root_.c_str()); + key.DeleteKey(kTestKeyPath); + } + + ~RegistryOverrideManagerTest() override { + base::win::RegKey key(HKEY_CURRENT_USER); + key.DeleteKey(fake_test_key_root_.c_str()); + } + + void AssertKeyExists(const std::wstring& key_path) { + base::win::RegKey key; + ASSERT_EQ(ERROR_SUCCESS, + key.Open(HKEY_CURRENT_USER, key_path.c_str(), KEY_READ)) + << key_path << " does not exist."; + } + + void AssertKeyAbsent(const std::wstring& key_path) { + base::win::RegKey key; + ASSERT_NE(ERROR_SUCCESS, + key.Open(HKEY_CURRENT_USER, key_path.c_str(), KEY_READ)) + << key_path << " exists but it should not."; + } + + void CreateKey(const std::wstring& key_path) { + base::win::RegKey key; + ASSERT_EQ(ERROR_SUCCESS, + key.Create(HKEY_CURRENT_USER, key_path.c_str(), KEY_ALL_ACCESS)); + } + + std::wstring FakeOverrideManagerPath(const base::Time& time) { + return fake_test_key_root_ + L"\\" + + base::AsWString(base::NumberToString16(time.ToInternalValue())); + } + + void CreateManager(const base::Time& timestamp) { + manager_.reset(new RegistryOverrideManager(timestamp, fake_test_key_root_)); + manager_->OverrideRegistry(HKEY_CURRENT_USER); + } + + std::wstring fake_test_key_root_; + std::unique_ptr<RegistryOverrideManager> manager_; +}; + +TEST_F(RegistryOverrideManagerTest, Basic) { + ASSERT_NO_FATAL_FAILURE(CreateManager(base::Time::Now())); + + base::win::RegKey create_key; + EXPECT_EQ(ERROR_SUCCESS, + create_key.Create(HKEY_CURRENT_USER, kTestKeyPath, KEY_ALL_ACCESS)); + EXPECT_TRUE(create_key.Valid()); + EXPECT_EQ(ERROR_SUCCESS, create_key.WriteValue(kTestValueName, 42)); + create_key.Close(); + + ASSERT_NO_FATAL_FAILURE(AssertKeyExists(kTestKeyPath)); + + DWORD value; + base::win::RegKey read_key; + EXPECT_EQ(ERROR_SUCCESS, + read_key.Open(HKEY_CURRENT_USER, kTestKeyPath, KEY_READ)); + EXPECT_TRUE(read_key.Valid()); + EXPECT_EQ(ERROR_SUCCESS, read_key.ReadValueDW(kTestValueName, &value)); + EXPECT_EQ(42u, value); + read_key.Close(); + + manager_.reset(); + + ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(kTestKeyPath)); +} + +TEST_F(RegistryOverrideManagerTest, DeleteStaleKeys) { + base::Time::Exploded kTestTimeExploded = {2013, 11, 1, 4, 0, 0, 0, 0}; + base::Time kTestTime; + EXPECT_TRUE(base::Time::FromUTCExploded(kTestTimeExploded, &kTestTime)); + + std::wstring path_garbage = fake_test_key_root_ + L"\\Blah"; + std::wstring path_very_stale = + FakeOverrideManagerPath(kTestTime - base::TimeDelta::FromDays(100)); + std::wstring path_stale = + FakeOverrideManagerPath(kTestTime - base::TimeDelta::FromDays(5)); + std::wstring path_current = + FakeOverrideManagerPath(kTestTime - base::TimeDelta::FromMinutes(1)); + std::wstring path_future = + FakeOverrideManagerPath(kTestTime + base::TimeDelta::FromMinutes(1)); + + ASSERT_NO_FATAL_FAILURE(CreateKey(path_garbage)); + ASSERT_NO_FATAL_FAILURE(CreateKey(path_very_stale)); + ASSERT_NO_FATAL_FAILURE(CreateKey(path_stale)); + ASSERT_NO_FATAL_FAILURE(CreateKey(path_current)); + ASSERT_NO_FATAL_FAILURE(CreateKey(path_future)); + + ASSERT_NO_FATAL_FAILURE(CreateManager(kTestTime)); + manager_.reset(); + + ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(path_garbage)); + ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(path_very_stale)); + ASSERT_NO_FATAL_FAILURE(AssertKeyAbsent(path_stale)); + ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_current)); + ASSERT_NO_FATAL_FAILURE(AssertKeyExists(path_future)); +} + +} // namespace registry_util diff --git a/chromium/base/test/test_shared_library.cc b/chromium/base/test/test_shared_library.cc new file mode 100644 index 00000000000..99c04674cea --- /dev/null +++ b/chromium/base/test/test_shared_library.cc @@ -0,0 +1,30 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/native_library_test_utils.h" + +extern "C" { + +int NATIVE_LIBRARY_TEST_ALWAYS_EXPORT GetExportedValue() { + return g_native_library_exported_value; +} + +void NATIVE_LIBRARY_TEST_ALWAYS_EXPORT SetExportedValue(int value) { + g_native_library_exported_value = value; +} + +// A test function used only to verify basic dynamic symbol resolution. +int NATIVE_LIBRARY_TEST_ALWAYS_EXPORT GetSimpleTestValue() { + return 5; +} + +// When called by |NativeLibraryTest.LoadLibraryPreferOwnSymbols|, this should +// forward to the local definition of NativeLibraryTestIncrement(), even though +// the test module also links in the native_library_test_utils source library +// which exports it. +int NATIVE_LIBRARY_TEST_ALWAYS_EXPORT GetIncrementValue() { + return NativeLibraryTestIncrement(); +} + +} // extern "C" diff --git a/chromium/base/test/test_shared_memory_util.cc b/chromium/base/test/test_shared_memory_util.cc new file mode 100644 index 00000000000..43bbb7dbc2e --- /dev/null +++ b/chromium/base/test/test_shared_memory_util.cc @@ -0,0 +1,169 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_shared_memory_util.h" + +#include <gtest/gtest.h> + +#include <stddef.h> +#include <stdint.h> + +#include "base/logging.h" +#include "build/build_config.h" + +#if defined(OS_POSIX) && !defined(OS_NACL) +#include <errno.h> +#include <string.h> +#include <sys/mman.h> +#include <unistd.h> +#endif + +#if defined(OS_FUCHSIA) +#include <lib/zx/vmar.h> +#include <zircon/rights.h> +#endif + +#if defined(OS_MACOSX) && !defined(OS_IOS) +#include <mach/mach_vm.h> +#endif + +#if defined(OS_WIN) +#include <aclapi.h> +#endif + +namespace base { + +#if !defined(OS_NACL) + +static const size_t kDataSize = 1024; + +// Common routine used with Posix file descriptors. Check that shared memory +// file descriptor |fd| does not allow writable mappings. Return true on +// success, false otherwise. +#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS)) +static bool CheckReadOnlySharedMemoryFdPosix(int fd) { +// Note that the error on Android is EPERM, unlike other platforms where +// it will be EACCES. +#if defined(OS_ANDROID) + const int kExpectedErrno = EPERM; +#else + const int kExpectedErrno = EACCES; +#endif + errno = 0; + void* address = + mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + const bool success = (address != nullptr) && (address != MAP_FAILED); + if (success) { + LOG(ERROR) << "mmap() should have failed!"; + munmap(address, kDataSize); // Cleanup. + return false; + } + if (errno != kExpectedErrno) { + LOG(ERROR) << "Expected mmap() to return " << kExpectedErrno + << " but returned " << errno << ": " << strerror(errno) << "\n"; + return false; + } + return true; +} +#endif // OS_POSIX && !(defined(OS_MACOSX) && !defined(OS_IOS)) + +#if defined(OS_FUCHSIA) +// Fuchsia specific implementation. +bool CheckReadOnlySharedMemoryFuchsiaHandle(zx::unowned_vmo handle) { + const uint32_t flags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE; + uintptr_t addr; + const zx_status_t status = + zx::vmar::root_self()->map(0, *handle, 0U, kDataSize, flags, &addr); + if (status == ZX_OK) { + LOG(ERROR) << "zx_vmar_map() should have failed!"; + zx::vmar::root_self()->unmap(addr, kDataSize); + return false; + } + if (status != ZX_ERR_ACCESS_DENIED) { + LOG(ERROR) << "Expected zx_vmar_map() to return " << ZX_ERR_ACCESS_DENIED + << " (ZX_ERR_ACCESS_DENIED) but returned " << status << "\n"; + return false; + } + return true; +} + +#elif defined(OS_MACOSX) && !defined(OS_IOS) +bool CheckReadOnlySharedMemoryMachPort(mach_port_t memory_object) { + mach_vm_address_t memory; + const kern_return_t kr = mach_vm_map( + mach_task_self(), &memory, kDataSize, 0, VM_FLAGS_ANYWHERE, memory_object, + 0, FALSE, VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, VM_INHERIT_NONE); + if (kr == KERN_SUCCESS) { + LOG(ERROR) << "mach_vm_map() should have failed!"; + mach_vm_deallocate(mach_task_self(), memory, kDataSize); // Cleanup. + return false; + } + return true; +} + +#elif defined(OS_WIN) +bool CheckReadOnlySharedMemoryWindowsHandle(HANDLE handle) { + void* memory = + MapViewOfFile(handle, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kDataSize); + if (memory != nullptr) { + LOG(ERROR) << "MapViewOfFile() should have failed!"; + UnmapViewOfFile(memory); + return false; + } + return true; +} +#endif + +bool CheckReadOnlyPlatformSharedMemoryRegionForTesting( + subtle::PlatformSharedMemoryRegion region) { + if (region.GetMode() != subtle::PlatformSharedMemoryRegion::Mode::kReadOnly) { + LOG(ERROR) << "Expected region mode is " + << static_cast<int>( + subtle::PlatformSharedMemoryRegion::Mode::kReadOnly) + << " but actual is " << static_cast<int>(region.GetMode()); + return false; + } + +#if defined(OS_MACOSX) && !defined(OS_IOS) + return CheckReadOnlySharedMemoryMachPort(region.GetPlatformHandle()); +#elif defined(OS_FUCHSIA) + return CheckReadOnlySharedMemoryFuchsiaHandle(region.GetPlatformHandle()); +#elif defined(OS_WIN) + return CheckReadOnlySharedMemoryWindowsHandle(region.GetPlatformHandle()); +#elif defined(OS_ANDROID) + return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle()); +#else + return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle().fd); +#endif +} + +#endif // !OS_NACL + +WritableSharedMemoryMapping MapForTesting( + subtle::PlatformSharedMemoryRegion* region) { + return MapAtForTesting(region, 0, region->GetSize()); +} + +WritableSharedMemoryMapping MapAtForTesting( + subtle::PlatformSharedMemoryRegion* region, + off_t offset, + size_t size) { + void* memory = nullptr; + size_t mapped_size = 0; + if (!region->MapAt(offset, size, &memory, &mapped_size)) + return {}; + + return WritableSharedMemoryMapping(memory, size, mapped_size, + region->GetGUID()); +} + +template <> +std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping> +CreateMappedRegion(size_t size) { + MappedReadOnlyRegion mapped_region = ReadOnlySharedMemoryRegion::Create(size); + return {std::move(mapped_region.region), std::move(mapped_region.mapping)}; +} + +} // namespace base diff --git a/chromium/base/test/test_shared_memory_util.h b/chromium/base/test/test_shared_memory_util.h new file mode 100644 index 00000000000..e23d83e1fee --- /dev/null +++ b/chromium/base/test/test_shared_memory_util.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_ +#define BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_ + +#include "base/memory/platform_shared_memory_region.h" +#include "base/memory/read_only_shared_memory_region.h" +#include "base/memory/shared_memory_mapping.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +// Check that the shared memory |region| cannot be used to perform a writable +// mapping with low-level system APIs like mmap(). Return true in case of +// success (i.e. writable mappings are _not_ allowed), or false otherwise. +bool CheckReadOnlyPlatformSharedMemoryRegionForTesting( + subtle::PlatformSharedMemoryRegion region); + +// Creates a scoped mapping from a PlatformSharedMemoryRegion. It's useful for +// PlatformSharedMemoryRegion testing to not leak mapped memory. +// WritableSharedMemoryMapping is used for wrapping because it has max +// capabilities but the actual permission depends on the |region|'s mode. +// This must not be used in production where PlatformSharedMemoryRegion should +// be wrapped with {Writable,Unsafe,ReadOnly}SharedMemoryRegion. +WritableSharedMemoryMapping MapAtForTesting( + subtle::PlatformSharedMemoryRegion* region, + off_t offset, + size_t size); + +WritableSharedMemoryMapping MapForTesting( + subtle::PlatformSharedMemoryRegion* region); + +template <typename SharedMemoryRegionType> +std::pair<SharedMemoryRegionType, WritableSharedMemoryMapping> +CreateMappedRegion(size_t size) { + SharedMemoryRegionType region = SharedMemoryRegionType::Create(size); + WritableSharedMemoryMapping mapping = region.Map(); + return {std::move(region), std::move(mapping)}; +} + +// Template specialization of CreateMappedRegion<>() for +// the ReadOnlySharedMemoryRegion. We need this because +// ReadOnlySharedMemoryRegion::Create() has a different return type. +template <> +std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping> +CreateMappedRegion(size_t size); + +} // namespace base + +#endif // BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_ diff --git a/chromium/base/test/test_shortcut_win.cc b/chromium/base/test/test_shortcut_win.cc new file mode 100644 index 00000000000..b80fd967ba7 --- /dev/null +++ b/chromium/base/test/test_shortcut_win.cc @@ -0,0 +1,154 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_shortcut_win.h" + +#include <windows.h> +#include <objbase.h> +#include <shlobj.h> +#include <propkey.h> +#include <wrl/client.h> + +#include "base/files/file_path.h" +#include "base/strings/string16.h" +#include "base/strings/string_util.h" +#include "base/strings/utf_string_conversions.h" +#include "base/win/scoped_propvariant.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace win { + +void ValidatePathsAreEqual(const FilePath& expected_path, + const FilePath& actual_path) { + wchar_t long_expected_path_chars[MAX_PATH] = {0}; + wchar_t long_actual_path_chars[MAX_PATH] = {0}; + + // If |expected_path| is empty confirm immediately that |actual_path| is also + // empty. + if (expected_path.empty()) { + EXPECT_TRUE(actual_path.empty()); + return; + } + + // Proceed with LongPathName matching which will also confirm the paths exist. + EXPECT_NE(0U, ::GetLongPathName(expected_path.value().c_str(), + long_expected_path_chars, MAX_PATH)) + << "Failed to get LongPathName of " << expected_path.value(); + EXPECT_NE(0U, ::GetLongPathName(actual_path.value().c_str(), + long_actual_path_chars, MAX_PATH)) + << "Failed to get LongPathName of " << actual_path.value(); + + FilePath long_expected_path(long_expected_path_chars); + FilePath long_actual_path(long_actual_path_chars); + EXPECT_FALSE(long_expected_path.empty()); + EXPECT_FALSE(long_actual_path.empty()); + + EXPECT_EQ(long_expected_path, long_actual_path); +} + +void ValidateShortcut(const FilePath& shortcut_path, + const ShortcutProperties& properties) { + Microsoft::WRL::ComPtr<IShellLink> i_shell_link; + Microsoft::WRL::ComPtr<IPersistFile> i_persist_file; + + wchar_t read_target[MAX_PATH] = {0}; + wchar_t read_working_dir[MAX_PATH] = {0}; + wchar_t read_arguments[MAX_PATH] = {0}; + wchar_t read_description[MAX_PATH] = {0}; + wchar_t read_icon[MAX_PATH] = {0}; + int read_icon_index = 0; + + HRESULT hr; + + // Initialize the shell interfaces. + EXPECT_TRUE(SUCCEEDED(hr = ::CoCreateInstance(CLSID_ShellLink, NULL, + CLSCTX_INPROC_SERVER, + IID_PPV_ARGS(&i_shell_link)))); + if (FAILED(hr)) + return; + + EXPECT_TRUE(SUCCEEDED(hr = i_shell_link.As(&i_persist_file))); + if (FAILED(hr)) + return; + + // Load the shortcut. + EXPECT_TRUE( + SUCCEEDED(hr = i_persist_file->Load(shortcut_path.value().c_str(), 0))) + << "Failed to load shortcut at " << shortcut_path.value(); + if (FAILED(hr)) + return; + + if (properties.options & ShortcutProperties::PROPERTIES_TARGET) { + EXPECT_TRUE(SUCCEEDED( + i_shell_link->GetPath(read_target, MAX_PATH, NULL, SLGP_SHORTPATH))); + ValidatePathsAreEqual(properties.target, FilePath(read_target)); + } + + if (properties.options & ShortcutProperties::PROPERTIES_WORKING_DIR) { + EXPECT_TRUE(SUCCEEDED( + i_shell_link->GetWorkingDirectory(read_working_dir, MAX_PATH))); + ValidatePathsAreEqual(properties.working_dir, FilePath(read_working_dir)); + } + + if (properties.options & ShortcutProperties::PROPERTIES_ARGUMENTS) { + EXPECT_TRUE( + SUCCEEDED(i_shell_link->GetArguments(read_arguments, MAX_PATH))); + EXPECT_EQ(properties.arguments, read_arguments); + } + + if (properties.options & ShortcutProperties::PROPERTIES_DESCRIPTION) { + EXPECT_TRUE( + SUCCEEDED(i_shell_link->GetDescription(read_description, MAX_PATH))); + EXPECT_EQ(properties.description, read_description); + } + + if (properties.options & ShortcutProperties::PROPERTIES_ICON) { + EXPECT_TRUE(SUCCEEDED( + i_shell_link->GetIconLocation(read_icon, MAX_PATH, &read_icon_index))); + ValidatePathsAreEqual(properties.icon, FilePath(read_icon)); + EXPECT_EQ(properties.icon_index, read_icon_index); + } + + Microsoft::WRL::ComPtr<IPropertyStore> property_store; + EXPECT_TRUE(SUCCEEDED(hr = i_shell_link.As(&property_store))); + if (FAILED(hr)) + return; + + if (properties.options & ShortcutProperties::PROPERTIES_APP_ID) { + ScopedPropVariant pv_app_id; + EXPECT_EQ(S_OK, property_store->GetValue(PKEY_AppUserModel_ID, + pv_app_id.Receive())); + switch (pv_app_id.get().vt) { + case VT_EMPTY: + EXPECT_TRUE(properties.app_id.empty()); + break; + case VT_LPWSTR: + EXPECT_EQ(properties.app_id, pv_app_id.get().pwszVal); + break; + default: + ADD_FAILURE() << "Unexpected variant type: " << pv_app_id.get().vt; + } + } + + if (properties.options & ShortcutProperties::PROPERTIES_DUAL_MODE) { + ScopedPropVariant pv_dual_mode; + EXPECT_EQ(S_OK, property_store->GetValue(PKEY_AppUserModel_IsDualMode, + pv_dual_mode.Receive())); + switch (pv_dual_mode.get().vt) { + case VT_EMPTY: + EXPECT_FALSE(properties.dual_mode); + break; + case VT_BOOL: + EXPECT_EQ(properties.dual_mode, + static_cast<bool>(pv_dual_mode.get().boolVal)); + break; + default: + ADD_FAILURE() << "Unexpected variant type: " << pv_dual_mode.get().vt; + } + } +} + +} // namespace win +} // namespace base diff --git a/chromium/base/test/test_shortcut_win.h b/chromium/base/test/test_shortcut_win.h new file mode 100644 index 00000000000..b828e8bb1f3 --- /dev/null +++ b/chromium/base/test/test_shortcut_win.h @@ -0,0 +1,30 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SHORTCUT_WIN_H_ +#define BASE_TEST_TEST_SHORTCUT_WIN_H_ + +#include "base/files/file_path.h" +#include "base/win/shortcut.h" + +// Windows shortcut functions used only by tests. + +namespace base { +namespace win { + +// Validates |actual_path|'s LongPathName case-insensitively matches +// |expected_path|'s LongPathName. +void ValidatePathsAreEqual(const base::FilePath& expected_path, + const base::FilePath& actual_path); + +// Validates that a shortcut exists at |shortcut_path| with the expected +// |properties|. +// Logs gtest failures on failed verifications. +void ValidateShortcut(const FilePath& shortcut_path, + const ShortcutProperties& properties); + +} // namespace win +} // namespace base + +#endif // BASE_TEST_TEST_SHORTCUT_WIN_H_ diff --git a/chromium/base/test/test_simple_task_runner.cc b/chromium/base/test/test_simple_task_runner.cc new file mode 100644 index 00000000000..3e5d70ee4cf --- /dev/null +++ b/chromium/base/test/test_simple_task_runner.cc @@ -0,0 +1,103 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_simple_task_runner.h" + +#include <utility> + +#include "base/check.h" +#include "base/memory/ptr_util.h" +#include "base/threading/thread_task_runner_handle.h" + +namespace base { + +TestSimpleTaskRunner::TestSimpleTaskRunner() = default; + +TestSimpleTaskRunner::~TestSimpleTaskRunner() = default; + +bool TestSimpleTaskRunner::PostDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) { + AutoLock auto_lock(lock_); + pending_tasks_.push_back(TestPendingTask(from_here, std::move(task), + TimeTicks(), delay, + TestPendingTask::NESTABLE)); + return true; +} + +bool TestSimpleTaskRunner::PostNonNestableDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) { + AutoLock auto_lock(lock_); + pending_tasks_.push_back(TestPendingTask(from_here, std::move(task), + TimeTicks(), delay, + TestPendingTask::NON_NESTABLE)); + return true; +} + +// TODO(gab): Use SequenceToken here to differentiate between tasks running in +// the scope of this TestSimpleTaskRunner and other task runners sharing this +// thread. http://crbug.com/631186 +bool TestSimpleTaskRunner::RunsTasksInCurrentSequence() const { + return thread_ref_ == PlatformThread::CurrentRef(); +} + +base::circular_deque<TestPendingTask> TestSimpleTaskRunner::TakePendingTasks() { + AutoLock auto_lock(lock_); + return std::move(pending_tasks_); +} + +size_t TestSimpleTaskRunner::NumPendingTasks() const { + AutoLock auto_lock(lock_); + return pending_tasks_.size(); +} + +bool TestSimpleTaskRunner::HasPendingTask() const { + AutoLock auto_lock(lock_); + return !pending_tasks_.empty(); +} + +base::TimeDelta TestSimpleTaskRunner::NextPendingTaskDelay() const { + AutoLock auto_lock(lock_); + return pending_tasks_.front().GetTimeToRun() - base::TimeTicks(); +} + +base::TimeDelta TestSimpleTaskRunner::FinalPendingTaskDelay() const { + AutoLock auto_lock(lock_); + return pending_tasks_.back().GetTimeToRun() - base::TimeTicks(); +} + +void TestSimpleTaskRunner::ClearPendingTasks() { + AutoLock auto_lock(lock_); + pending_tasks_.clear(); +} + +void TestSimpleTaskRunner::RunPendingTasks() { + DCHECK(RunsTasksInCurrentSequence()); + + // Swap with a local variable to avoid re-entrancy problems. + base::circular_deque<TestPendingTask> tasks_to_run; + { + AutoLock auto_lock(lock_); + tasks_to_run.swap(pending_tasks_); + } + + // Multiple test task runners can share the same thread for determinism in + // unit tests. Make sure this TestSimpleTaskRunner's tasks run in its scope. + ScopedClosureRunner undo_override; + if (!ThreadTaskRunnerHandle::IsSet() || + ThreadTaskRunnerHandle::Get() != this) { + undo_override = ThreadTaskRunnerHandle::OverrideForTesting(this); + } + + for (auto& task : tasks_to_run) + std::move(task.task).Run(); +} + +void TestSimpleTaskRunner::RunUntilIdle() { + while (HasPendingTask()) + RunPendingTasks(); +} + +} // namespace base diff --git a/chromium/base/test/test_simple_task_runner.h b/chromium/base/test/test_simple_task_runner.h new file mode 100644 index 00000000000..245ca88c1ce --- /dev/null +++ b/chromium/base/test/test_simple_task_runner.h @@ -0,0 +1,97 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_ +#define BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_ + +#include "base/callback.h" +#include "base/compiler_specific.h" +#include "base/containers/circular_deque.h" +#include "base/macros.h" +#include "base/single_thread_task_runner.h" +#include "base/synchronization/lock.h" +#include "base/test/test_pending_task.h" +#include "base/threading/platform_thread.h" + +namespace base { + +class TimeDelta; + +// ATTENTION: Prefer using base::test::TaskEnvironment and a task runner +// obtained from base/task/post_task.h over this class. This class isn't as +// "simple" as it seems specifically because it runs tasks in a surprising order +// (delays aren't respected and nesting doesn't behave as usual). Should you +// prefer to flush all tasks regardless of delays, +// TaskEnvironment::TimeSource::MOCK_TIME and +// TaskEnvironment::FastForwardUntilNoTasksRemain() have you covered. +// +// TestSimpleTaskRunner is a simple TaskRunner implementation that can +// be used for testing. It implements SingleThreadTaskRunner as that +// interface implements SequencedTaskRunner, which in turn implements +// TaskRunner, so TestSimpleTaskRunner can be passed in to a function +// that accepts any *TaskRunner object. +// +// TestSimpleTaskRunner has the following properties which make it simple: +// +// - Tasks are simply stored in a queue in FIFO order, ignoring delay +// and nestability. +// - Tasks aren't guaranteed to be destroyed immediately after +// they're run. +// +// However, TestSimpleTaskRunner allows for reentrancy, in that it +// handles the running of tasks that in turn call back into itself +// (e.g., to post more tasks). +// +// Note that, like any TaskRunner, TestSimpleTaskRunner is +// ref-counted. +class TestSimpleTaskRunner : public SingleThreadTaskRunner { + public: + TestSimpleTaskRunner(); + + // SingleThreadTaskRunner implementation. + bool PostDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) override; + bool PostNonNestableDelayedTask(const Location& from_here, + OnceClosure task, + TimeDelta delay) override; + + bool RunsTasksInCurrentSequence() const override; + + base::circular_deque<TestPendingTask> TakePendingTasks(); + size_t NumPendingTasks() const; + bool HasPendingTask() const; + base::TimeDelta NextPendingTaskDelay() const; + base::TimeDelta FinalPendingTaskDelay() const; + + // Clears the queue of pending tasks without running them. + void ClearPendingTasks(); + + // Runs each current pending task in order and clears the queue. Tasks posted + // by the tasks that run within this call do not run within this call. Can + // only be called on the thread that created this TestSimpleTaskRunner. + void RunPendingTasks(); + + // Runs pending tasks until the queue is empty. Can only be called on the + // thread that created this TestSimpleTaskRunner. + void RunUntilIdle(); + + protected: + ~TestSimpleTaskRunner() override; + + private: + // Thread on which this was instantiated. + const PlatformThreadRef thread_ref_ = PlatformThread::CurrentRef(); + + // Synchronizes access to |pending_tasks_|. + mutable Lock lock_; + + base::circular_deque<TestPendingTask> pending_tasks_; + + DISALLOW_COPY_AND_ASSIGN(TestSimpleTaskRunner); +}; + +} // namespace base + +#endif // BASE_TEST_TEST_SIMPLE_TASK_RUNNER_H_ diff --git a/chromium/base/test/test_suite.cc b/chromium/base/test/test_suite.cc new file mode 100644 index 00000000000..7aefd46640a --- /dev/null +++ b/chromium/base/test/test_suite.cc @@ -0,0 +1,667 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_suite.h" + +#include <signal.h> + +#include <memory> + +#include "base/at_exit.h" +#include "base/base_paths.h" +#include "base/base_switches.h" +#include "base/bind.h" +#include "base/command_line.h" +#include "base/debug/debugger.h" +#include "base/debug/profiler.h" +#include "base/debug/stack_trace.h" +#include "base/feature_list.h" +#include "base/files/file_path.h" +#include "base/files/file_util.h" +#include "base/i18n/icu_util.h" +#include "base/logging.h" +#include "base/macros.h" +#include "base/memory/ptr_util.h" +#include "base/no_destructor.h" +#include "base/path_service.h" +#include "base/process/launch.h" +#include "base/process/memory.h" +#include "base/process/process.h" +#include "base/process/process_handle.h" +#include "base/task/thread_pool/thread_pool_instance.h" +#include "base/test/gtest_xml_unittest_result_printer.h" +#include "base/test/gtest_xml_util.h" +#include "base/test/icu_test_util.h" +#include "base/test/launcher/unit_test_launcher.h" +#include "base/test/mock_entropy_provider.h" +#include "base/test/multiprocess_test.h" +#include "base/test/scoped_feature_list.h" +#include "base/test/scoped_run_loop_timeout.h" +#include "base/test/test_switches.h" +#include "base/test/test_timeouts.h" +#include "base/threading/platform_thread.h" +#include "base/time/time.h" +#include "build/build_config.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "testing/multiprocess_func_list.h" + +#if defined(OS_MACOSX) +#include "base/mac/scoped_nsautorelease_pool.h" +#include "base/process/port_provider_mac.h" +#if defined(OS_IOS) +#include "base/test/test_listener_ios.h" +#endif // OS_IOS +#endif // OS_MACOSX + +#include "base/i18n/rtl.h" +#if !defined(OS_IOS) +#include "base/strings/string_util.h" +#include "third_party/icu/source/common/unicode/uloc.h" +#endif + +#if defined(OS_ANDROID) +#include "base/test/test_support_android.h" +#endif + +#if defined(OS_IOS) +#include "base/test/test_support_ios.h" +#endif + +#if defined(OS_LINUX) +#include "base/test/fontconfig_util_linux.h" +#endif + +#if defined(OS_FUCHSIA) +#include "base/base_paths_fuchsia.h" +#endif + +#if defined(OS_WIN) && defined(_DEBUG) +#include <crtdbg.h> +#endif + +namespace base { + +namespace { + +// Returns true if the test is marked as "MAYBE_". +// When using different prefixes depending on platform, we use MAYBE_ and +// preprocessor directives to replace MAYBE_ with the target prefix. +bool IsMarkedMaybe(const testing::TestInfo& test) { + return strncmp(test.name(), "MAYBE_", 6) == 0; +} + +class DisableMaybeTests : public testing::EmptyTestEventListener { + public: + void OnTestStart(const testing::TestInfo& test_info) override { + ASSERT_FALSE(IsMarkedMaybe(test_info)) + << "Probably the OS #ifdefs don't include all of the necessary " + "platforms.\nPlease ensure that no tests have the MAYBE_ prefix " + "after the code is preprocessed."; + } +}; + +class ResetCommandLineBetweenTests : public testing::EmptyTestEventListener { + public: + ResetCommandLineBetweenTests() : old_command_line_(CommandLine::NO_PROGRAM) {} + + void OnTestStart(const testing::TestInfo& test_info) override { + old_command_line_ = *CommandLine::ForCurrentProcess(); + } + + void OnTestEnd(const testing::TestInfo& test_info) override { + *CommandLine::ForCurrentProcess() = old_command_line_; + } + + private: + CommandLine old_command_line_; + + DISALLOW_COPY_AND_ASSIGN(ResetCommandLineBetweenTests); +}; + +// Initializes a base::test::ScopedFeatureList for each individual test, which +// involves a FeatureList and a FieldTrialList, such that unit test don't need +// to initialize them manually. +class FeatureListScopedToEachTest : public testing::EmptyTestEventListener { + public: + FeatureListScopedToEachTest() = default; + ~FeatureListScopedToEachTest() override = default; + + FeatureListScopedToEachTest(const FeatureListScopedToEachTest&) = delete; + FeatureListScopedToEachTest& operator=(const FeatureListScopedToEachTest&) = + delete; + + void OnTestStart(const testing::TestInfo& test_info) override { + field_trial_list_ = std::make_unique<FieldTrialList>( + std::make_unique<MockEntropyProvider>()); + + const CommandLine* command_line = CommandLine::ForCurrentProcess(); + + // Set up a FeatureList instance, so that code using that API will not hit a + // an error that it's not set. It will be cleared automatically. + // TestFeatureForBrowserTest1 and TestFeatureForBrowserTest2 used in + // ContentBrowserTestScopedFeatureListTest to ensure ScopedFeatureList keeps + // features from command line. + std::string enabled = + command_line->GetSwitchValueASCII(switches::kEnableFeatures); + std::string disabled = + command_line->GetSwitchValueASCII(switches::kDisableFeatures); + enabled += ",TestFeatureForBrowserTest1"; + disabled += ",TestFeatureForBrowserTest2"; + scoped_feature_list_.InitFromCommandLine(enabled, disabled); + + // The enable-features and disable-features flags were just slurped into a + // FeatureList, so remove them from the command line. Tests should enable + // and disable features via the ScopedFeatureList API rather than + // command-line flags. + CommandLine new_command_line(command_line->GetProgram()); + CommandLine::SwitchMap switches = command_line->GetSwitches(); + + switches.erase(switches::kEnableFeatures); + switches.erase(switches::kDisableFeatures); + + for (const auto& iter : switches) + new_command_line.AppendSwitchNative(iter.first, iter.second); + + *CommandLine::ForCurrentProcess() = new_command_line; + } + + void OnTestEnd(const testing::TestInfo& test_info) override { + scoped_feature_list_.Reset(); + field_trial_list_.reset(); + } + + private: + std::unique_ptr<FieldTrialList> field_trial_list_; + test::ScopedFeatureList scoped_feature_list_; +}; + +class CheckForLeakedGlobals : public testing::EmptyTestEventListener { + public: + CheckForLeakedGlobals() = default; + + // Check for leaks in individual tests. + void OnTestStart(const testing::TestInfo& test) override { + feature_list_set_before_test_ = FeatureList::GetInstance(); + thread_pool_set_before_test_ = ThreadPoolInstance::Get(); + } + void OnTestEnd(const testing::TestInfo& test) override { + DCHECK_EQ(feature_list_set_before_test_, FeatureList::GetInstance()) + << " in test " << test.test_case_name() << "." << test.name(); + DCHECK_EQ(thread_pool_set_before_test_, ThreadPoolInstance::Get()) + << " in test " << test.test_case_name() << "." << test.name(); + } + + // Check for leaks in test cases (consisting of one or more tests). + void OnTestCaseStart(const testing::TestCase& test_case) override { + feature_list_set_before_case_ = FeatureList::GetInstance(); + thread_pool_set_before_case_ = ThreadPoolInstance::Get(); + } + void OnTestCaseEnd(const testing::TestCase& test_case) override { + DCHECK_EQ(feature_list_set_before_case_, FeatureList::GetInstance()) + << " in case " << test_case.name(); + DCHECK_EQ(thread_pool_set_before_case_, ThreadPoolInstance::Get()) + << " in case " << test_case.name(); + } + + private: + FeatureList* feature_list_set_before_test_ = nullptr; + FeatureList* feature_list_set_before_case_ = nullptr; + ThreadPoolInstance* thread_pool_set_before_test_ = nullptr; + ThreadPoolInstance* thread_pool_set_before_case_ = nullptr; + + DISALLOW_COPY_AND_ASSIGN(CheckForLeakedGlobals); +}; + +// base::Process is not available on iOS +#if !defined(OS_IOS) +class CheckProcessPriority : public testing::EmptyTestEventListener { + public: + CheckProcessPriority() { CHECK(!IsProcessBackgrounded()); } + + void OnTestStart(const testing::TestInfo& test) override { + EXPECT_FALSE(IsProcessBackgrounded()); + } + void OnTestEnd(const testing::TestInfo& test) override { +#if !defined(OS_MACOSX) + // Flakes are found on Mac OS 10.11. See https://crbug.com/931721#c7. + EXPECT_FALSE(IsProcessBackgrounded()); +#endif + } + + private: +#if defined(OS_MACOSX) + // Returns the calling process's task port, ignoring its argument. + class CurrentProcessPortProvider : public PortProvider { + mach_port_t TaskForPid(ProcessHandle process) const override { + // This PortProvider implementation only works for the current process. + CHECK_EQ(process, base::GetCurrentProcessHandle()); + return mach_task_self(); + } + }; +#endif + + bool IsProcessBackgrounded() const { +#if defined(OS_MACOSX) + CurrentProcessPortProvider port_provider; + return Process::Current().IsProcessBackgrounded(&port_provider); +#else + return Process::Current().IsProcessBackgrounded(); +#endif + } + + DISALLOW_COPY_AND_ASSIGN(CheckProcessPriority); +}; +#endif // !defined(OS_IOS) + +class CheckThreadPriority : public testing::EmptyTestEventListener { + public: + CheckThreadPriority(bool check_thread_priority_at_test_end) + : check_thread_priority_at_test_end_(check_thread_priority_at_test_end) { + CHECK_EQ(base::PlatformThread::GetCurrentThreadPriority(), + base::ThreadPriority::NORMAL) + << " -- The thread priority of this process is not the default. This " + "usually indicates nice has been used, which is not supported."; + } + + void OnTestStart(const testing::TestInfo& test) override { + EXPECT_EQ(base::PlatformThread::GetCurrentThreadPriority(), + base::ThreadPriority::NORMAL) + << " -- The thread priority of this process is not the default. This " + "usually indicates nice has been used, which is not supported."; + } + void OnTestEnd(const testing::TestInfo& test) override { + if (check_thread_priority_at_test_end_) { + EXPECT_EQ(base::PlatformThread::GetCurrentThreadPriority(), + base::ThreadPriority::NORMAL) + << " -- The thread priority of this process is not the default. This " + "usually indicates nice has been used, which is not supported."; + } + } + + private: + const bool check_thread_priority_at_test_end_; + + DISALLOW_COPY_AND_ASSIGN(CheckThreadPriority); +}; + +const std::string& GetProfileName() { + static const NoDestructor<std::string> profile_name([]() { + const CommandLine& command_line = *CommandLine::ForCurrentProcess(); + if (command_line.HasSwitch(switches::kProfilingFile)) + return command_line.GetSwitchValueASCII(switches::kProfilingFile); + else + return std::string("test-profile-{pid}"); + }()); + return *profile_name; +} + +void InitializeLogging() { +#if defined(OS_ANDROID) + InitAndroidTestLogging(); +#else + + FilePath log_filename; + FilePath exe; + PathService::Get(FILE_EXE, &exe); + +#if defined(OS_FUCHSIA) + // Write logfiles to /data, because the default log location alongside the + // executable (/pkg) is read-only. + FilePath data_dir; + PathService::Get(DIR_APP_DATA, &data_dir); + log_filename = data_dir.Append(exe.BaseName()) + .ReplaceExtension(FILE_PATH_LITERAL("log")); +#else + log_filename = exe.ReplaceExtension(FILE_PATH_LITERAL("log")); +#endif // defined(OS_FUCHSIA) + + logging::LoggingSettings settings; + settings.log_file_path = log_filename.value().c_str(); + settings.logging_dest = logging::LOG_TO_ALL; + settings.delete_old = logging::DELETE_OLD_LOG_FILE; + logging::InitLogging(settings); + // We want process and thread IDs because we may have multiple processes. + // Note: temporarily enabled timestamps in an effort to catch bug 6361. + logging::SetLogItems(true, true, true, true); +#endif // !defined(OS_ANDROID) +} + +} // namespace + +int RunUnitTestsUsingBaseTestSuite(int argc, char** argv) { + TestSuite test_suite(argc, argv); + return LaunchUnitTests(argc, argv, + BindOnce(&TestSuite::Run, Unretained(&test_suite))); +} + +TestSuite::TestSuite(int argc, char** argv) { + PreInitialize(); + InitializeFromCommandLine(argc, argv); + // Logging must be initialized before any thread has a chance to call logging + // functions. + InitializeLogging(); +} + +#if defined(OS_WIN) +TestSuite::TestSuite(int argc, wchar_t** argv) { + PreInitialize(); + InitializeFromCommandLine(argc, argv); + // Logging must be initialized before any thread has a chance to call logging + // functions. + InitializeLogging(); +} +#endif // defined(OS_WIN) + +TestSuite::~TestSuite() { + if (initialized_command_line_) + CommandLine::Reset(); +} + +void TestSuite::InitializeFromCommandLine(int argc, char** argv) { + initialized_command_line_ = CommandLine::Init(argc, argv); + testing::InitGoogleTest(&argc, argv); + testing::InitGoogleMock(&argc, argv); + +#if defined(OS_IOS) + InitIOSRunHook(this, argc, argv); +#endif +} + +#if defined(OS_WIN) +void TestSuite::InitializeFromCommandLine(int argc, wchar_t** argv) { + // Windows CommandLine::Init ignores argv anyway. + initialized_command_line_ = CommandLine::Init(argc, NULL); + testing::InitGoogleTest(&argc, argv); + testing::InitGoogleMock(&argc, argv); +} +#endif // defined(OS_WIN) + +void TestSuite::PreInitialize() { + DCHECK(!is_initialized_); + +#if defined(OS_WIN) + testing::GTEST_FLAG(catch_exceptions) = false; +#endif + EnableTerminationOnHeapCorruption(); +#if defined(OS_LINUX) && defined(USE_AURA) + // When calling native char conversion functions (e.g wrctomb) we need to + // have the locale set. In the absence of such a call the "C" locale is the + // default. In the gtk code (below) gtk_init() implicitly sets a locale. + setlocale(LC_ALL, ""); + // We still need number to string conversions to be locale insensitive. + setlocale(LC_NUMERIC, "C"); +#endif // defined(OS_LINUX) && defined(USE_AURA) + + // On Android, AtExitManager is created in + // testing/android/native_test_wrapper.cc before main() is called. +#if !defined(OS_ANDROID) + at_exit_manager_.reset(new AtExitManager); +#endif + + // Don't add additional code to this function. Instead add it to + // Initialize(). See bug 6436. +} + +void TestSuite::AddTestLauncherResultPrinter() { + // Only add the custom printer if requested. + if (!CommandLine::ForCurrentProcess()->HasSwitch( + switches::kTestLauncherOutput)) { + return; + } + + FilePath output_path(CommandLine::ForCurrentProcess()->GetSwitchValuePath( + switches::kTestLauncherOutput)); + + // Do not add the result printer if output path already exists. It's an + // indicator there is a process printing to that file, and we're likely + // its child. Do not clobber the results in that case. + if (PathExists(output_path)) { + LOG(WARNING) << "Test launcher output path " << output_path.AsUTF8Unsafe() + << " exists. Not adding test launcher result printer."; + return; + } + + printer_ = new XmlUnitTestResultPrinter; + CHECK(printer_->Initialize(output_path)) + << "Output path is " << output_path.AsUTF8Unsafe() + << " and PathExists(output_path) is " << PathExists(output_path); + testing::TestEventListeners& listeners = + testing::UnitTest::GetInstance()->listeners(); + listeners.Append(printer_); +} + +// Don't add additional code to this method. Instead add it to +// Initialize(). See bug 6436. +int TestSuite::Run() { +#if defined(OS_IOS) + RunTestsFromIOSApp(); +#endif + +#if defined(OS_MACOSX) + mac::ScopedNSAutoreleasePool scoped_pool; +#endif + + Initialize(); + std::string client_func = + CommandLine::ForCurrentProcess()->GetSwitchValueASCII( + switches::kTestChildProcess); + + // Check to see if we are being run as a client process. + if (!client_func.empty()) + return multi_process_function_list::InvokeChildProcessTest(client_func); +#if defined(OS_IOS) + test_listener_ios::RegisterTestEndListener(); +#endif + + int result = RUN_ALL_TESTS(); + +#if defined(OS_MACOSX) + // This MUST happen before Shutdown() since Shutdown() tears down + // objects (such as NotificationService::current()) that Cocoa + // objects use to remove themselves as observers. + scoped_pool.Recycle(); +#endif + + Shutdown(); + + return result; +} + +void TestSuite::DisableCheckForLeakedGlobals() { + DCHECK(!is_initialized_); + check_for_leaked_globals_ = false; +} + +void TestSuite::DisableCheckForThreadAndProcessPriority() { + DCHECK(!is_initialized_); + check_for_thread_and_process_priority_ = false; +} + +void TestSuite::DisableCheckForThreadPriorityAtTestEnd() { + DCHECK(!is_initialized_); + check_for_thread_priority_at_test_end_ = false; +} + +void TestSuite::UnitTestAssertHandler(const char* file, + int line, + const StringPiece summary, + const StringPiece stack_trace) { +#if defined(OS_ANDROID) + // Correlating test stdio with logcat can be difficult, so we emit this + // helpful little hint about what was running. Only do this for Android + // because other platforms don't separate out the relevant logs in the same + // way. + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + if (test_info) { + LOG(ERROR) << "Currently running: " << test_info->test_case_name() << "." + << test_info->name(); + fflush(stderr); + } +#endif // defined(OS_ANDROID) + + // XmlUnitTestResultPrinter inherits gtest format, where assert has summary + // and message. In GTest, summary is just a logged text, and message is a + // logged text, concatenated with stack trace of assert. + // Concatenate summary and stack_trace here, to pass it as a message. + if (printer_) { + const std::string summary_str = summary.as_string(); + const std::string stack_trace_str = summary_str + stack_trace.as_string(); + printer_->OnAssert(file, line, summary_str, stack_trace_str); + } + + // The logging system actually prints the message before calling the assert + // handler. Just exit now to avoid printing too many stack traces. + _exit(1); +} + +#if defined(OS_WIN) +namespace { + +// Handlers for invalid parameter, pure call, and abort. They generate a +// breakpoint to ensure that we get a call stack on these failures. +// These functions should be written to be unique in order to avoid confusing +// call stacks from /OPT:ICF function folding. Printing a unique message or +// returning a unique value will do this. Note that for best results they need +// to be unique from *all* functions in Chrome. +void InvalidParameter(const wchar_t* expression, + const wchar_t* function, + const wchar_t* file, + unsigned int line, + uintptr_t reserved) { + // CRT printed message is sufficient. + __debugbreak(); + _exit(1); +} + +void PureCall() { + fprintf(stderr, "Pure-virtual function call. Terminating.\n"); + __debugbreak(); + _exit(1); +} + +void AbortHandler(int signal) { + // Print EOL after the CRT abort message. + fprintf(stderr, "\n"); + __debugbreak(); +} + +} // namespace +#endif + +void TestSuite::SuppressErrorDialogs() { +#if defined(OS_WIN) + UINT new_flags = + SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX; + + // Preserve existing error mode, as discussed at + // http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx + UINT existing_flags = SetErrorMode(new_flags); + SetErrorMode(existing_flags | new_flags); + +#if defined(_DEBUG) + // Suppress the "Debug Assertion Failed" dialog. + // TODO(hbono): remove this code when gtest has it. + // http://groups.google.com/d/topic/googletestframework/OjuwNlXy5ac/discussion + _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); + _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); + _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); + _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); +#endif // defined(_DEBUG) + + // See crbug.com/783040 for test code to trigger all of these failures. + _set_invalid_parameter_handler(InvalidParameter); + _set_purecall_handler(PureCall); + signal(SIGABRT, AbortHandler); +#endif // defined(OS_WIN) +} + +void TestSuite::Initialize() { + DCHECK(!is_initialized_); + + test::ScopedRunLoopTimeout::SetAddGTestFailureOnTimeout(); + + const CommandLine* command_line = CommandLine::ForCurrentProcess(); +#if !defined(OS_IOS) + if (command_line->HasSwitch(switches::kWaitForDebugger)) { + debug::WaitForDebugger(60, true); + } +#endif + +#if defined(OS_IOS) + InitIOSTestMessageLoop(); +#endif // OS_IOS + +#if defined(OS_ANDROID) + InitAndroidTestMessageLoop(); +#endif // else defined(OS_ANDROID) + + CHECK(debug::EnableInProcessStackDumping()); +#if defined(OS_WIN) + RouteStdioToConsole(true); + // Make sure we run with high resolution timer to minimize differences + // between production code and test code. + Time::EnableHighResolutionTimer(true); +#endif // defined(OS_WIN) + + // In some cases, we do not want to see standard error dialogs. + if (!debug::BeingDebugged() && + !command_line->HasSwitch("show-error-dialogs")) { + SuppressErrorDialogs(); + debug::SetSuppressDebugUI(true); + assert_handler_ = std::make_unique<logging::ScopedLogAssertHandler>( + BindRepeating(&TestSuite::UnitTestAssertHandler, Unretained(this))); + } + + test::InitializeICUForTesting(); + + // A number of tests only work if the locale is en_US. This can be an issue + // on all platforms. To fix this we force the default locale to en_US. This + // does not affect tests that explicitly overrides the locale for testing. + // TODO(jshin): Should we set the locale via an OS X locale API here? + i18n::SetICUDefaultLocale("en_US"); + +#if defined(OS_LINUX) + SetUpFontconfig(); +#endif + + // Add TestEventListeners to enforce certain properties across tests. + testing::TestEventListeners& listeners = + testing::UnitTest::GetInstance()->listeners(); + listeners.Append(new DisableMaybeTests); + listeners.Append(new ResetCommandLineBetweenTests); + listeners.Append(new FeatureListScopedToEachTest); + if (check_for_leaked_globals_) + listeners.Append(new CheckForLeakedGlobals); + if (check_for_thread_and_process_priority_) { +#if !defined(OS_ANDROID) + // TODO(https://crbug.com/931706): Check thread priority on Android. + listeners.Append( + new CheckThreadPriority(check_for_thread_priority_at_test_end_)); +#endif +#if !defined(OS_IOS) + listeners.Append(new CheckProcessPriority); +#endif + } + + AddTestLauncherResultPrinter(); + + TestTimeouts::Initialize(); + + trace_to_file_.BeginTracingFromCommandLineOptions(); + + debug::StartProfiling(GetProfileName()); + + debug::VerifyDebugger(); + + is_initialized_ = true; +} + +void TestSuite::Shutdown() { + DCHECK(is_initialized_); + debug::StopProfiling(); +} + +} // namespace base diff --git a/chromium/base/test/test_suite.h b/chromium/base/test/test_suite.h new file mode 100644 index 00000000000..372c5f58a8a --- /dev/null +++ b/chromium/base/test/test_suite.h @@ -0,0 +1,110 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SUITE_H_ +#define BASE_TEST_TEST_SUITE_H_ + +// Defines a basic test suite framework for running gtest based tests. You can +// instantiate this class in your main function and call its Run method to run +// any gtest based tests that are linked into your executable. + +#include <memory> +#include <string> + +#include "base/at_exit.h" +#include "base/logging.h" +#include "base/macros.h" +#include "base/test/trace_to_file.h" +#include "build/build_config.h" + +namespace testing { +class TestInfo; +} + +namespace base { + +class XmlUnitTestResultPrinter; + +// Instantiates TestSuite, runs it and returns exit code. +int RunUnitTestsUsingBaseTestSuite(int argc, char** argv); + +class TestSuite { + public: + // Match function used by the GetTestCount method. + typedef bool (*TestMatch)(const testing::TestInfo&); + + TestSuite(int argc, char** argv); +#if defined(OS_WIN) + TestSuite(int argc, wchar_t** argv); +#endif // defined(OS_WIN) + virtual ~TestSuite(); + + int Run(); + + // Disables checks for thread and process priority at the beginning and end of + // each test. Most tests should not use this. + void DisableCheckForThreadAndProcessPriority(); + + // Disables checks for thread priority at the end of each test (still checks + // at the beginning of each test). This should be used for tests that run in + // their own process and should start with normal priorities but are allowed + // to end with different priorities. + void DisableCheckForThreadPriorityAtTestEnd(); + + // Disables checks for certain global objects being leaked across tests. + void DisableCheckForLeakedGlobals(); + + protected: + // By default fatal log messages (e.g. from DCHECKs) result in error dialogs + // which gum up buildbots. Use a minimalistic assert handler which just + // terminates the process. + void UnitTestAssertHandler(const char* file, + int line, + const base::StringPiece summary, + const base::StringPiece stack_trace); + + // Disable crash dialogs so that it doesn't gum up the buildbot + virtual void SuppressErrorDialogs(); + + // Override these for custom initialization and shutdown handling. Use these + // instead of putting complex code in your constructor/destructor. + + virtual void Initialize(); + virtual void Shutdown(); + + // Make sure that we setup an AtExitManager so Singleton objects will be + // destroyed. + std::unique_ptr<base::AtExitManager> at_exit_manager_; + + private: + void AddTestLauncherResultPrinter(); + + void InitializeFromCommandLine(int argc, char** argv); +#if defined(OS_WIN) + void InitializeFromCommandLine(int argc, wchar_t** argv); +#endif // defined(OS_WIN) + + // Basic initialization for the test suite happens here. + void PreInitialize(); + + test::TraceToFile trace_to_file_; + + bool initialized_command_line_ = false; + + XmlUnitTestResultPrinter* printer_ = nullptr; + + std::unique_ptr<logging::ScopedLogAssertHandler> assert_handler_; + + bool check_for_leaked_globals_ = true; + bool check_for_thread_and_process_priority_ = true; + bool check_for_thread_priority_at_test_end_ = true; + + bool is_initialized_ = false; + + DISALLOW_COPY_AND_ASSIGN(TestSuite); +}; + +} // namespace base + +#endif // BASE_TEST_TEST_SUITE_H_ diff --git a/chromium/base/test/test_support_android.cc b/chromium/base/test/test_support_android.cc new file mode 100644 index 00000000000..0a8a5b2db18 --- /dev/null +++ b/chromium/base/test/test_support_android.cc @@ -0,0 +1,223 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include <stdarg.h> +#include <string.h> + +#include "base/android/path_utils.h" +#include "base/files/file_path.h" +#include "base/logging.h" +#include "base/macros.h" +#include "base/memory/singleton.h" +#include "base/message_loop/message_pump.h" +#include "base/message_loop/message_pump_android.h" +#include "base/path_service.h" +#include "base/synchronization/waitable_event.h" +#include "base/test/multiprocess_test.h" + +namespace { + +base::FilePath* g_test_data_dir = nullptr; + +struct RunState { + RunState(base::MessagePump::Delegate* delegate, int run_depth) + : delegate(delegate), + run_depth(run_depth), + should_quit(false) { + } + + base::MessagePump::Delegate* delegate; + + // Used to count how many Run() invocations are on the stack. + int run_depth; + + // Used to flag that the current Run() invocation should return ASAP. + bool should_quit; +}; + +RunState* g_state = nullptr; + +// A singleton WaitableEvent wrapper so we avoid a busy loop in +// MessagePumpForUIStub. Other platforms use the native event loop which blocks +// when there are no pending messages. +class Waitable { + public: + static Waitable* GetInstance() { + return base::Singleton<Waitable, + base::LeakySingletonTraits<Waitable>>::get(); + } + + // Signals that there are more work to do. + void Signal() { waitable_event_.Signal(); } + + // Blocks until more work is scheduled. + void Block() { waitable_event_.Wait(); } + + void Quit() { + g_state->should_quit = true; + Signal(); + } + + private: + friend struct base::DefaultSingletonTraits<Waitable>; + + Waitable() + : waitable_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC, + base::WaitableEvent::InitialState::NOT_SIGNALED) {} + + base::WaitableEvent waitable_event_; + + DISALLOW_COPY_AND_ASSIGN(Waitable); +}; + +// The MessagePumpForUI implementation for test purpose. +class MessagePumpForUIStub : public base::MessagePumpForUI { + public: + MessagePumpForUIStub() : base::MessagePumpForUI() { Waitable::GetInstance(); } + ~MessagePumpForUIStub() override {} + + bool IsTestImplementation() const override { return true; } + + // In tests, there isn't a native thread, as such RunLoop::Run() should be + // used to run the loop instead of attaching and delegating to the native + // loop. As such, this override ignores the Attach() request. + void Attach(base::MessagePump::Delegate* delegate) override {} + + void Run(base::MessagePump::Delegate* delegate) override { + // The following was based on message_pump_glib.cc, except we're using a + // WaitableEvent since there are no native message loop to use. + RunState state(delegate, g_state ? g_state->run_depth + 1 : 1); + + RunState* previous_state = g_state; + g_state = &state; + + // When not nested we can use the real implementation, otherwise fall back + // to the stub implementation. + if (g_state->run_depth > 1) { + RunNested(delegate); + } else { + MessagePumpForUI::Run(delegate); + } + + g_state = previous_state; + } + + void RunNested(base::MessagePump::Delegate* delegate) { + bool more_work_is_plausible = true; + + for (;;) { + if (!more_work_is_plausible) { + Waitable::GetInstance()->Block(); + if (g_state->should_quit) + break; + } + + Delegate::NextWorkInfo next_work_info = g_state->delegate->DoWork(); + more_work_is_plausible = next_work_info.is_immediate(); + if (g_state->should_quit) + break; + + if (more_work_is_plausible) + continue; + + more_work_is_plausible = g_state->delegate->DoIdleWork(); + if (g_state->should_quit) + break; + + more_work_is_plausible |= !next_work_info.delayed_run_time.is_max(); + } + } + + void Quit() override { + CHECK(g_state); + if (g_state->run_depth > 1) { + Waitable::GetInstance()->Quit(); + } else { + MessagePumpForUI::Quit(); + } + } + + void ScheduleWork() override { + if (g_state && g_state->run_depth > 1) { + Waitable::GetInstance()->Signal(); + } else { + MessagePumpForUI::ScheduleWork(); + } + } + + void ScheduleDelayedWork(const base::TimeTicks& delayed_work_time) override { + if (g_state && g_state->run_depth > 1) { + Waitable::GetInstance()->Signal(); + } else { + MessagePumpForUI::ScheduleDelayedWork(delayed_work_time); + } + } +}; + +std::unique_ptr<base::MessagePump> CreateMessagePumpForUIStub() { + return std::unique_ptr<base::MessagePump>(new MessagePumpForUIStub()); +} + +// Provides the test path for DIR_SOURCE_ROOT and DIR_ANDROID_APP_DATA. +bool GetTestProviderPath(int key, base::FilePath* result) { + switch (key) { + // TODO(agrieve): Stop overriding DIR_ANDROID_APP_DATA. + // https://crbug.com/617734 + // Instead DIR_ASSETS should be used to discover assets file location in + // tests. + case base::DIR_ANDROID_APP_DATA: + case base::DIR_ASSETS: + case base::DIR_SOURCE_ROOT: + CHECK(g_test_data_dir != nullptr); + *result = *g_test_data_dir; + return true; + default: + return false; + } +} + +void InitPathProvider(int key) { + base::FilePath path; + // If failed to override the key, that means the way has not been registered. + if (GetTestProviderPath(key, &path) && + !base::PathService::Override(key, path)) { + base::PathService::RegisterProvider(&GetTestProviderPath, key, key + 1); + } +} + +} // namespace + +namespace base { + +void InitAndroidTestLogging() { + logging::LoggingSettings settings; + settings.logging_dest = + logging::LOG_TO_SYSTEM_DEBUG_LOG | logging::LOG_TO_STDERR; + logging::InitLogging(settings); + // To view log output with IDs and timestamps use "adb logcat -v threadtime". + logging::SetLogItems(false, // Process ID + false, // Thread ID + false, // Timestamp + false); // Tick count +} + +void InitAndroidTestPaths(const FilePath& test_data_dir) { + if (g_test_data_dir) { + CHECK(test_data_dir == *g_test_data_dir); + return; + } + g_test_data_dir = new FilePath(test_data_dir); + InitPathProvider(DIR_SOURCE_ROOT); + InitPathProvider(DIR_ANDROID_APP_DATA); + InitPathProvider(DIR_ASSETS); +} + +void InitAndroidTestMessageLoop() { + // NOTE something else such as a JNI call may have already overridden the UI + // factory. + if (!MessagePump::IsMessagePumpForUIFactoryOveridden()) + MessagePump::OverrideMessagePumpForUIFactory(&CreateMessagePumpForUIStub); +} + +} // namespace base diff --git a/chromium/base/test/test_support_android.h b/chromium/base/test/test_support_android.h new file mode 100644 index 00000000000..4942e546110 --- /dev/null +++ b/chromium/base/test/test_support_android.h @@ -0,0 +1,25 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SUPPORT_ANDROID_H_ +#define BASE_TEST_TEST_SUPPORT_ANDROID_H_ + +#include "base/base_export.h" + +namespace base { + +class FilePath; + +// Init logging for tests on Android. Logs will be output into Android's logcat. +BASE_EXPORT void InitAndroidTestLogging(); + +// Init path providers for tests on Android. +BASE_EXPORT void InitAndroidTestPaths(const FilePath& test_data_dir); + +// Init the message loop for tests on Android. +BASE_EXPORT void InitAndroidTestMessageLoop(); + +} // namespace base + +#endif // BASE_TEST_TEST_SUPPORT_ANDROID_H_ diff --git a/chromium/base/test/test_support_ios.h b/chromium/base/test/test_support_ios.h new file mode 100644 index 00000000000..9064cb0973f --- /dev/null +++ b/chromium/base/test/test_support_ios.h @@ -0,0 +1,27 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SUPPORT_IOS_H_ +#define BASE_TEST_TEST_SUPPORT_IOS_H_ + +#include "base/test/test_suite.h" + +namespace base { + +// Inits the message loop for tests on iOS. +void InitIOSTestMessageLoop(); + +// Inits the run hook for tests on iOS. +void InitIOSRunHook(TestSuite* suite, int argc, char* argv[]); + +// Launches an iOS app that runs the tests in the suite passed to +// InitIOSRunHook. +void RunTestsFromIOSApp(); + +// Returns true if unittests should be run by the XCTest runnner. +bool ShouldRunIOSUnittestsWithXCTest(); + +} // namespace base + +#endif // BASE_TEST_TEST_SUPPORT_IOS_H_ diff --git a/chromium/base/test/test_support_ios.mm b/chromium/base/test/test_support_ios.mm new file mode 100644 index 00000000000..be949a11ee8 --- /dev/null +++ b/chromium/base/test/test_support_ios.mm @@ -0,0 +1,246 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#import "base/test/test_support_ios.h" + +#import <UIKit/UIKit.h> + +#include "base/check.h" +#include "base/command_line.h" +#include "base/debug/debugger.h" +#include "base/mac/scoped_nsobject.h" +#include "base/message_loop/message_pump.h" +#include "base/message_loop/message_pump_mac.h" +#import "base/test/ios/google_test_runner_delegate.h" +#include "base/test/test_suite.h" +#include "base/test/test_switches.h" +#include "testing/coverage_util_ios.h" + +// Springboard will kill any iOS app that fails to check in after launch within +// a given time. Starting a UIApplication before invoking TestSuite::Run +// prevents this from happening. + +// InitIOSRunHook saves the TestSuite and argc/argv, then invoking +// RunTestsFromIOSApp calls UIApplicationMain(), providing an application +// delegate class: ChromeUnitTestDelegate. The delegate implements +// application:didFinishLaunchingWithOptions: to invoke the TestSuite's Run +// method. + +// Since the executable isn't likely to be a real iOS UI, the delegate puts up a +// window displaying the app name. If a bunch of apps using MainHook are being +// run in a row, this provides an indication of which one is currently running. + +static base::TestSuite* g_test_suite = NULL; +static int g_argc; +static char** g_argv; + +@interface UIApplication (Testing) +- (void)_terminateWithStatus:(int)status; +@end + +#if TARGET_IPHONE_SIMULATOR +// Xcode 6 introduced behavior in the iOS Simulator where the software +// keyboard does not appear if a hardware keyboard is connected. The following +// declaration allows this behavior to be overriden when the app starts up. +@interface UIKeyboardImpl ++ (instancetype)sharedInstance; +- (void)setAutomaticMinimizationEnabled:(BOOL)enabled; +- (void)setSoftwareKeyboardShownByTouch:(BOOL)enabled; +@end +#endif // TARGET_IPHONE_SIMULATOR + +@interface ChromeUnitTestDelegate : NSObject <GoogleTestRunnerDelegate> { + base::scoped_nsobject<UIWindow> _window; +} +- (void)runTests; +@end + +@implementation ChromeUnitTestDelegate + +- (BOOL)application:(UIApplication *)application + didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { + +#if TARGET_IPHONE_SIMULATOR + // Xcode 6 introduced behavior in the iOS Simulator where the software + // keyboard does not appear if a hardware keyboard is connected. The following + // calls override this behavior by ensuring that the software keyboard is + // always shown. + [[UIKeyboardImpl sharedInstance] setAutomaticMinimizationEnabled:NO]; + [[UIKeyboardImpl sharedInstance] setSoftwareKeyboardShownByTouch:YES]; +#endif // TARGET_IPHONE_SIMULATOR + + CGRect bounds = [[UIScreen mainScreen] bounds]; + + // Yes, this is leaked, it's just to make what's running visible. + _window.reset([[UIWindow alloc] initWithFrame:bounds]); + [_window setBackgroundColor:[UIColor whiteColor]]; + [_window makeKeyAndVisible]; + + // Add a label with the app name. + UILabel* label = [[[UILabel alloc] initWithFrame:bounds] autorelease]; + label.text = [[NSProcessInfo processInfo] processName]; + label.textAlignment = NSTextAlignmentCenter; + [_window addSubview:label]; + + // An NSInternalInconsistencyException is thrown if the app doesn't have a + // root view controller. Set an empty one here. + [_window setRootViewController:[[[UIViewController alloc] init] autorelease]]; + + if ([self shouldRedirectOutputToFile]) + [self redirectOutput]; + + // Queue up the test run. + if (!base::ShouldRunIOSUnittestsWithXCTest()) { + // When running in XCTest mode, XCTest will invoke |runGoogleTest| directly. + // Otherwise, schedule a call to |runTests|. + [self performSelector:@selector(runTests) withObject:nil afterDelay:0.1]; + } + + return YES; +} + +// Returns true if the gtest output should be redirected to a file, then sent +// to NSLog when complete. This redirection is used because gtest only writes +// output to stdout, but results must be written to NSLog in order to show up in +// the device log that is retrieved from the device by the host. +- (BOOL)shouldRedirectOutputToFile { +#if !TARGET_IPHONE_SIMULATOR + // Tests in XCTest mode don't need to redirect output to a file because the + // test result parser analyzes console output. + return !base::ShouldRunIOSUnittestsWithXCTest() && + !base::debug::BeingDebugged(); +#else + return NO; +#endif // TARGET_IPHONE_SIMULATOR +} + +// Returns the path to the directory to store gtest output files. +- (NSString*)outputPath { + NSArray* searchPath = + NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, + NSUserDomainMask, + YES); + CHECK([searchPath count] > 0) << "Failed to get the Documents folder"; + return [searchPath objectAtIndex:0]; +} + +// Returns the path to file that stdout is redirected to. +- (NSString*)stdoutPath { + return [[self outputPath] stringByAppendingPathComponent:@"stdout.log"]; +} + +// Returns the path to file that stderr is redirected to. +- (NSString*)stderrPath { + return [[self outputPath] stringByAppendingPathComponent:@"stderr.log"]; +} + +// Redirects stdout and stderr to files in the Documents folder in the app's +// sandbox. +- (void)redirectOutput { + freopen([[self stdoutPath] UTF8String], "w+", stdout); + freopen([[self stderrPath] UTF8String], "w+", stderr); +} + +// Reads the redirected gtest output from a file and writes it to NSLog. +- (void)writeOutputToNSLog { + // Close the redirected stdout and stderr files so that the content written to + // NSLog doesn't end up in these files. + fclose(stdout); + fclose(stderr); + for (NSString* path in @[ [self stdoutPath], [self stderrPath]]) { + NSString* content = [NSString stringWithContentsOfFile:path + encoding:NSUTF8StringEncoding + error:NULL]; + NSArray* lines = [content componentsSeparatedByCharactersInSet: + [NSCharacterSet newlineCharacterSet]]; + + NSLog(@"Writing contents of %@ to NSLog", path); + for (NSString* line in lines) { + NSLog(@"%@", line); + } + } +} + +- (BOOL)supportsRunningGoogleTests { + return base::ShouldRunIOSUnittestsWithXCTest(); +} + +- (int)runGoogleTests { + coverage_util::ConfigureCoverageReportPath(); + + int exitStatus = g_test_suite->Run(); + + if ([self shouldRedirectOutputToFile]) + [self writeOutputToNSLog]; + + return exitStatus; +} + +- (void)runTests { + DCHECK(!base::ShouldRunIOSUnittestsWithXCTest()); + + int exitStatus = [self runGoogleTests]; + + // If a test app is too fast, it will exit before Instruments has has a + // a chance to initialize and no test results will be seen. + [NSThread sleepUntilDate:[NSDate dateWithTimeIntervalSinceNow:2.0]]; + _window.reset(); + + // Use the hidden selector to try and cleanly take down the app (otherwise + // things can think the app crashed even on a zero exit status). + UIApplication* application = [UIApplication sharedApplication]; + [application _terminateWithStatus:exitStatus]; + + exit(exitStatus); +} + +@end + +namespace { + +std::unique_ptr<base::MessagePump> CreateMessagePumpForUIForTests() { + // A basic MessagePump will do quite nicely in tests. + return std::unique_ptr<base::MessagePump>(new base::MessagePumpCFRunLoop()); +} + +} // namespace + +namespace base { + +void InitIOSTestMessageLoop() { + MessagePump::OverrideMessagePumpForUIFactory(&CreateMessagePumpForUIForTests); +} + +void InitIOSRunHook(TestSuite* suite, int argc, char* argv[]) { + g_test_suite = suite; + g_argc = argc; + g_argv = argv; +} + +void RunTestsFromIOSApp() { + // When TestSuite::Run is invoked it calls RunTestsFromIOSApp(). On the first + // invocation, this method fires up an iOS app via UIApplicationMain. Since + // UIApplicationMain does not return until the app exits, control does not + // return to the initial TestSuite::Run invocation, so the app invokes + // TestSuite::Run a second time and since |ran_hook| is true at this point, + // this method is a no-op and control returns to TestSuite:Run so that test + // are executed. Once the app exits, RunTestsFromIOSApp calls exit() so that + // control is not returned to the initial invocation of TestSuite::Run. + static bool ran_hook = false; + if (!ran_hook) { + ran_hook = true; + @autoreleasepool { + int exit_status = + UIApplicationMain(g_argc, g_argv, nil, @"ChromeUnitTestDelegate"); + exit(exit_status); + } + } +} + +bool ShouldRunIOSUnittestsWithXCTest() { + return base::CommandLine::ForCurrentProcess()->HasSwitch( + switches::kEnableRunIOSUnittestsWithXCTest); +} + +} // namespace base diff --git a/chromium/base/test/test_switches.cc b/chromium/base/test/test_switches.cc new file mode 100644 index 00000000000..ec022ced181 --- /dev/null +++ b/chromium/base/test/test_switches.cc @@ -0,0 +1,104 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_switches.h" + +// Flag to show the help message. +const char switches::kHelpFlag[] = "help"; + +// Flag to run all tests and the launcher in a single process. Useful for +// debugging a specific test in a debugger. +const char switches::kSingleProcessTests[] = "single-process-tests"; + +// Maximum number of tests to run in a single batch. +const char switches::kTestLauncherBatchLimit[] = "test-launcher-batch-limit"; + +// Sets defaults desirable for the continuous integration bots, e.g. parallel +// test execution and test retries. +const char switches::kTestLauncherBotMode[] = + "test-launcher-bot-mode"; + +// Makes it possible to debug the launcher itself. By default the launcher +// automatically switches to single process mode when it detects presence +// of debugger. +const char switches::kTestLauncherDebugLauncher[] = + "test-launcher-debug-launcher"; + +// Force running all requested tests and retries even if too many test errors +// occur. +const char switches::kTestLauncherForceRunBrokenTests[] = + "test-launcher-force-run-broken-tests"; + +// List of paths to files (separated by ';') containing test filters (one +// pattern per line). +const char switches::kTestLauncherFilterFile[] = "test-launcher-filter-file"; + +// Whether the test launcher should launch in "interactive mode", which disables +// timeouts (and may have other effects for specific test types). +const char switches::kTestLauncherInteractive[] = "test-launcher-interactive"; + +// Number of parallel test launcher jobs. +const char switches::kTestLauncherJobs[] = "test-launcher-jobs"; + +// Path to list of compiled in tests. +const char switches::kTestLauncherListTests[] = "test-launcher-list-tests"; + +// Path to test results file in our custom test launcher format. +const char switches::kTestLauncherOutput[] = "test-launcher-output"; + +// These two flags has the same effect, but don't use them at the same time. +// And isolated-script-test-launcher-retry-limit is preferred in the future. +// Maximum number of times to retry a test after failure. +const char switches::kTestLauncherRetryLimit[] = "test-launcher-retry-limit"; +const char switches::kIsolatedScriptTestLauncherRetryLimit[] = + "isolated-script-test-launcher-retry-limit"; + +// Path to test results file with all the info from the test launcher. +const char switches::kTestLauncherSummaryOutput[] = + "test-launcher-summary-output"; + +// Causes the test launcher to print information about leaked files and/or +// directories in child process's temporary directories. +const char switches::kTestLauncherPrintTempLeaks[] = + "test-launcher-print-temp-leaks"; + +// Flag controlling when test stdio is displayed as part of the launcher's +// standard output. +const char switches::kTestLauncherPrintTestStdio[] = + "test-launcher-print-test-stdio"; + +// Print a writable path and exit (for internal use). +const char switches::kTestLauncherPrintWritablePath[] = + "test-launcher-print-writable-path"; + +// Index of the test shard to run, starting from 0 (first shard) to total shards +// minus one (last shard). +const char switches::kTestLauncherShardIndex[] = + "test-launcher-shard-index"; + +// Limit of test part results in the output. Default limit is 10. +// Negative value will completely disable limit. +const char switches::kTestLauncherTestPartResultsLimit[] = + "test-launcher-test-part-results-limit"; + +// Total number of shards. Must be the same for all shards. +const char switches::kTestLauncherTotalShards[] = + "test-launcher-total-shards"; + +// Time (in milliseconds) that the tests should wait before timing out. +const char switches::kTestLauncherTimeout[] = "test-launcher-timeout"; + +// Path where to save a trace of test launcher's execution. +const char switches::kTestLauncherTrace[] = "test-launcher-trace"; + +// TODO(phajdan.jr): Clean up the switch names. +const char switches::kTestTinyTimeout[] = "test-tiny-timeout"; +const char switches::kUiTestActionTimeout[] = "ui-test-action-timeout"; +const char switches::kUiTestActionMaxTimeout[] = "ui-test-action-max-timeout"; + +#if defined(OS_IOS) +// If enabled, runs unittests using the XCTest test runner. +const char switches::kEnableRunIOSUnittestsWithXCTest[] = + "enable-run-ios-unittests-with-xctest"; +#endif diff --git a/chromium/base/test/test_switches.h b/chromium/base/test/test_switches.h new file mode 100644 index 00000000000..9e2e627e407 --- /dev/null +++ b/chromium/base/test/test_switches.h @@ -0,0 +1,46 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_SWITCHES_H_ +#define BASE_TEST_TEST_SWITCHES_H_ + +#include "build/build_config.h" + +namespace switches { + +// All switches in alphabetical order. The switches should be documented +// alongside the definition of their values in the .cc file. +extern const char kHelpFlag[]; +extern const char kSingleProcessTests[]; +extern const char kTestLauncherBatchLimit[]; +extern const char kTestLauncherBotMode[]; +extern const char kTestLauncherDebugLauncher[]; +extern const char kTestLauncherForceRunBrokenTests[]; +extern const char kTestLauncherFilterFile[]; +extern const char kTestLauncherInteractive[]; +extern const char kTestLauncherJobs[]; +extern const char kTestLauncherListTests[]; +extern const char kTestLauncherOutput[]; +extern const char kTestLauncherRetryLimit[]; +extern const char kIsolatedScriptTestLauncherRetryLimit[]; +extern const char kTestLauncherSummaryOutput[]; +extern const char kTestLauncherPrintTempLeaks[]; +extern const char kTestLauncherPrintTestStdio[]; +extern const char kTestLauncherPrintWritablePath[]; +extern const char kTestLauncherShardIndex[]; +extern const char kTestLauncherTestPartResultsLimit[]; +extern const char kTestLauncherTotalShards[]; +extern const char kTestLauncherTimeout[]; +extern const char kTestLauncherTrace[]; +extern const char kTestTinyTimeout[]; +extern const char kUiTestActionTimeout[]; +extern const char kUiTestActionMaxTimeout[]; + +#if defined(OS_IOS) +extern const char kEnableRunIOSUnittestsWithXCTest[]; +#endif + +} // namespace switches + +#endif // BASE_TEST_TEST_SWITCHES_H_ diff --git a/chromium/base/test/test_timeouts.cc b/chromium/base/test/test_timeouts.cc new file mode 100644 index 00000000000..e77f17569c0 --- /dev/null +++ b/chromium/base/test/test_timeouts.cc @@ -0,0 +1,132 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_timeouts.h" + +#include <algorithm> +#include <string> + +#include "base/clang_profiling_buildflags.h" +#include "base/command_line.h" +#include "base/debug/debugger.h" +#include "base/logging.h" +#include "base/strings/string_number_conversions.h" +#include "base/test/test_switches.h" +#include "build/build_config.h" + +namespace { + +// Sets value to the greatest of: +// 1) value's current value multiplied by kTimeoutMultiplier (assuming +// InitializeTimeout is called only once per value). +// 2) min_value. +// 3) the numerical value given by switch_name on the command line multiplied +// by kTimeoutMultiplier. +void InitializeTimeout(const char* switch_name, int min_value, int* value) { + DCHECK(value); + int command_line_timeout = 0; + if (base::CommandLine::ForCurrentProcess()->HasSwitch(switch_name)) { + std::string string_value(base::CommandLine::ForCurrentProcess()-> + GetSwitchValueASCII(switch_name)); + if (!base::StringToInt(string_value, &command_line_timeout)) { + LOG(FATAL) << "Timeout value \"" << string_value << "\" was parsed as " + << command_line_timeout; + } + } + +#if defined(MEMORY_SANITIZER) + // ASan/TSan/MSan instrument each memory access. This may slow the execution + // down significantly. + // For MSan the slowdown depends heavily on the value of msan_track_origins + // build flag. The multiplier below corresponds to msan_track_origins = 1. +#if defined(OS_CHROMEOS) + // A handful of tests on ChromeOS run *very* close to the 6x limit used + // else where, so it's bumped to 7x. + constexpr int kTimeoutMultiplier = 7; +#else + constexpr int kTimeoutMultiplier = 6; +#endif +#elif defined(ADDRESS_SANITIZER) && defined(OS_WIN) + // ASan/Win has not been optimized yet, give it a higher + // timeout multiplier. See http://crbug.com/412471 + constexpr int kTimeoutMultiplier = 3; +#elif defined(ADDRESS_SANITIZER) && defined(OS_CHROMEOS) + // A number of tests on ChromeOS run very close to the 2x limit, so ChromeOS + // gets 3x. + constexpr int kTimeoutMultiplier = 3; +#elif defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) + constexpr int kTimeoutMultiplier = 2; +#elif BUILDFLAG(CLANG_PROFILING) + // On coverage build, tests run 3x slower. + constexpr int kTimeoutMultiplier = 3; +#elif !defined(NDEBUG) && defined(OS_CHROMEOS) + // TODO(crbug.com/1058022): reduce the multiplier back to 2x. + // A number of tests on ChromeOS run very close to the base limit, so ChromeOS + // gets 3x. + constexpr int kTimeoutMultiplier = 3; +#else + constexpr int kTimeoutMultiplier = 1; +#endif + + *value = std::max(std::max(*value, command_line_timeout) * kTimeoutMultiplier, + min_value); +} + +} // namespace + +// static +bool TestTimeouts::initialized_ = false; + +// The timeout values should increase in the order they appear in this block. +// static +int TestTimeouts::tiny_timeout_ms_ = 100; +int TestTimeouts::action_timeout_ms_ = 10000; +int TestTimeouts::action_max_timeout_ms_ = 30000; +int TestTimeouts::test_launcher_timeout_ms_ = 45000; + +// static +void TestTimeouts::Initialize() { + DCHECK(!initialized_); + initialized_ = true; + + const bool being_debugged = base::debug::BeingDebugged(); + if (being_debugged) { + fprintf(stdout, + "Detected presence of a debugger, running without test timeouts.\n"); + } + + // Note that these timeouts MUST be initialized in the correct order as + // per the CHECKS below. + + InitializeTimeout(switches::kTestTinyTimeout, 0, &tiny_timeout_ms_); + + // All timeouts other than the "tiny" one should be set to very large values + // when in a debugger or when run interactively, so that tests will not get + // auto-terminated. By setting the UI test action timeout to at least this + // value, we guarantee the subsequent timeouts will be this large also. + // Setting the "tiny" timeout to a large value as well would make some tests + // hang (because it's used as a task-posting delay). In particular this + // causes problems for some iOS device tests, which are always run inside a + // debugger (thus BeingDebugged() is true even on the bots). + int min_ui_test_action_timeout = tiny_timeout_ms_; + if (being_debugged || base::CommandLine::ForCurrentProcess()->HasSwitch( + switches::kTestLauncherInteractive)) { + constexpr int kVeryLargeTimeoutMs = 100'000'000; + min_ui_test_action_timeout = kVeryLargeTimeoutMs; + } + + InitializeTimeout(switches::kUiTestActionTimeout, min_ui_test_action_timeout, + &action_timeout_ms_); + InitializeTimeout(switches::kUiTestActionMaxTimeout, action_timeout_ms_, + &action_max_timeout_ms_); + + // Test launcher timeout is independent from anything above action timeout. + InitializeTimeout(switches::kTestLauncherTimeout, action_timeout_ms_, + &test_launcher_timeout_ms_); + + // The timeout values should be increasing in the right order. + CHECK_LE(tiny_timeout_ms_, action_timeout_ms_); + CHECK_LE(action_timeout_ms_, action_max_timeout_ms_); + CHECK_LE(action_timeout_ms_, test_launcher_timeout_ms_); +} diff --git a/chromium/base/test/test_timeouts.h b/chromium/base/test/test_timeouts.h new file mode 100644 index 00000000000..1bdda2a157b --- /dev/null +++ b/chromium/base/test/test_timeouts.h @@ -0,0 +1,63 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_TIMEOUTS_H_ +#define BASE_TEST_TEST_TIMEOUTS_H_ + +#include "base/logging.h" +#include "base/macros.h" +#include "base/time/time.h" + +// Returns common timeouts to use in tests. Makes it possible to adjust +// the timeouts for different environments (like TSan). +class TestTimeouts { + public: + // Initializes the timeouts. Non thread-safe. Should be called exactly once + // by the test suite. + static void Initialize(); + + // Timeout for actions that are expected to finish "almost instantly". This + // is used in various tests to post delayed tasks and usually functions more + // like a delay value than a timeout. + static base::TimeDelta tiny_timeout() { + DCHECK(initialized_); + return base::TimeDelta::FromMilliseconds(tiny_timeout_ms_); + } + + // Timeout to wait for something to happen. If you are not sure + // which timeout to use, this is the one you want. + static base::TimeDelta action_timeout() { + DCHECK(initialized_); + return base::TimeDelta::FromMilliseconds(action_timeout_ms_); + } + + // Timeout longer than the above, suitable to wait on success conditions which + // can take a while to achieve but still should expire on failure before + // |test_launcher_timeout()| terminates the process. Note that + // test_launcher_timeout() can be reached nonetheless when multiple such + // actions are compounded in the same test. + static base::TimeDelta action_max_timeout() { + DCHECK(initialized_); + return base::TimeDelta::FromMilliseconds(action_max_timeout_ms_); + } + + // Timeout for a single test launched used built-in test launcher. + // Do not use outside of the test launcher. + static base::TimeDelta test_launcher_timeout() { + DCHECK(initialized_); + return base::TimeDelta::FromMilliseconds(test_launcher_timeout_ms_); + } + + private: + static bool initialized_; + + static int tiny_timeout_ms_; + static int action_timeout_ms_; + static int action_max_timeout_ms_; + static int test_launcher_timeout_ms_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(TestTimeouts); +}; + +#endif // BASE_TEST_TEST_TIMEOUTS_H_ diff --git a/chromium/base/test/test_waitable_event.cc b/chromium/base/test/test_waitable_event.cc new file mode 100644 index 00000000000..09f5bb66e0e --- /dev/null +++ b/chromium/base/test/test_waitable_event.cc @@ -0,0 +1,28 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_waitable_event.h" + +#include <utility> + +namespace base { + +TestWaitableEvent::TestWaitableEvent(ResetPolicy reset_policy, + InitialState initial_state) + : WaitableEvent(reset_policy, initial_state) { + // Pretending this is only used while idle ensures this WaitableEvent is not + // instantiating a ScopedBlockingCallWithBaseSyncPrimitives in Wait(). In + // other words, test logic is considered "idle" work (not part of the tested + // logic). + declare_only_used_while_idle(); +} + +#if defined(OS_WIN) +TestWaitableEvent::TestWaitableEvent(win::ScopedHandle event_handle) + : WaitableEvent(std::move(event_handle)) { + declare_only_used_while_idle(); +} +#endif + +} // namespace base diff --git a/chromium/base/test/test_waitable_event.h b/chromium/base/test/test_waitable_event.h new file mode 100644 index 00000000000..ff3bc21529d --- /dev/null +++ b/chromium/base/test/test_waitable_event.h @@ -0,0 +1,40 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TEST_WAITABLE_EVENT_H_ +#define BASE_TEST_TEST_WAITABLE_EVENT_H_ + +#include "base/synchronization/waitable_event.h" +#include "build/build_config.h" + +#if defined(OS_WIN) +#include "base/win/scoped_handle.h" +#endif + +namespace base { + +// A WaitableEvent for use in tests, it has the same API as WaitableEvent with +// the following two distinctions: +// 1) ScopedAllowBaseSyncPrimitivesForTesting is not required to block on it. +// 2) It doesn't instantiate a ScopedBlockingCallWithBaseSyncPrimitives in +// Wait() (important in some //base tests that are thrown off when the +// WaitableEvents used to drive the test add additional ScopedBlockingCalls +// to the mix of monitored calls). +class TestWaitableEvent : public WaitableEvent { + public: + TestWaitableEvent(ResetPolicy reset_policy = ResetPolicy::MANUAL, + InitialState initial_state = InitialState::NOT_SIGNALED); + +#if defined(OS_WIN) + explicit TestWaitableEvent(win::ScopedHandle event_handle); +#endif +}; + +static_assert(sizeof(TestWaitableEvent) == sizeof(WaitableEvent), + "WaitableEvent is non-virtual, TestWaitableEvent must be usable " + "interchangeably."); + +} // namespace base + +#endif // BASE_TEST_TEST_WAITABLE_EVENT_H_ diff --git a/chromium/base/test/test_waitable_event_unittest.cc b/chromium/base/test/test_waitable_event_unittest.cc new file mode 100644 index 00000000000..8e344655916 --- /dev/null +++ b/chromium/base/test/test_waitable_event_unittest.cc @@ -0,0 +1,66 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/test_waitable_event.h" + +#include "base/bind.h" +#include "base/test/task_environment.h" +#include "base/threading/scoped_blocking_call_internal.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +class NoInvokeBlockingObserver : public internal::BlockingObserver { + public: + void BlockingStarted(BlockingType blocking_type) override { ADD_FAILURE(); } + void BlockingTypeUpgraded() override { ADD_FAILURE(); } + void BlockingEnded() override { ADD_FAILURE(); } +}; + +TEST(TestWaitableEvent, NoBlockingCall) { + test::TaskEnvironment task_environment; + + NoInvokeBlockingObserver test_observer; + internal::SetBlockingObserverForCurrentThread(&test_observer); + + TestWaitableEvent test_waitable_event; + ThreadPool::PostTask( + FROM_HERE, {}, + BindOnce(&WaitableEvent::Signal, Unretained(&test_waitable_event))); + test_waitable_event.Wait(); + + internal::ClearBlockingObserverForCurrentThread(); +} + +TEST(TestWaitableEvent, WaitingInPoolDoesntRequireAllowance) { + test::TaskEnvironment task_environment; + + TestWaitableEvent test_waitable_event; + // MayBlock()/WithBaseSyncPrimitives()/ScopedAllowBaseSyncPrimitivesForTesting + // are required to Wait() on a TestWaitableEvent. + ThreadPool::PostTask( + FROM_HERE, {}, + BindOnce(&WaitableEvent::Wait, Unretained(&test_waitable_event))); + test_waitable_event.Signal(); + + task_environment.RunUntilIdle(); +} + +// Binding &WaitableEvent::Signal or &TestWaitableEvent::Signal is equivalent. +TEST(TestWaitableEvent, CanBindEitherType) { + test::TaskEnvironment task_environment; + TestWaitableEvent test_waitable_event(WaitableEvent::ResetPolicy::AUTOMATIC); + + ThreadPool::PostTask( + FROM_HERE, {}, + BindOnce(&WaitableEvent::Signal, Unretained(&test_waitable_event))); + test_waitable_event.Wait(); + + ThreadPool::PostTask( + FROM_HERE, {}, + BindOnce(&TestWaitableEvent::Signal, Unretained(&test_waitable_event))); + test_waitable_event.Wait(); +} + +} // namespace base diff --git a/chromium/base/test/thread_pool_test_helpers_android.cc b/chromium/base/test/thread_pool_test_helpers_android.cc new file mode 100644 index 00000000000..f2590a42c4f --- /dev/null +++ b/chromium/base/test/thread_pool_test_helpers_android.cc @@ -0,0 +1,39 @@ +// Copyright (c) 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/task/thread_pool/thread_pool_instance.h" +#include "base/test/test_support_jni_headers/ThreadPoolTestHelpers_jni.h" + +namespace base { + +// ThreadPoolTestHelpers is a friend of ThreadPoolInstance which grants access +// to SetCanRun(). +class ThreadPoolTestHelpers { + public: + // Enables/disables an execution fence that prevents tasks from running. + static void BeginFenceForTesting(); + static void EndFenceForTesting(); +}; + +// static +void ThreadPoolTestHelpers::BeginFenceForTesting() { + ThreadPoolInstance::Get()->BeginFence(); +} + +// static +void ThreadPoolTestHelpers::EndFenceForTesting() { + ThreadPoolInstance::Get()->EndFence(); +} + +} // namespace base + +void JNI_ThreadPoolTestHelpers_EnableThreadPoolExecutionForTesting( + JNIEnv* env) { + base::ThreadPoolTestHelpers::EndFenceForTesting(); +} + +void JNI_ThreadPoolTestHelpers_DisableThreadPoolExecutionForTesting( + JNIEnv* env) { + base::ThreadPoolTestHelpers::BeginFenceForTesting(); +}
\ No newline at end of file diff --git a/chromium/base/test/thread_test_helper.cc b/chromium/base/test/thread_test_helper.cc new file mode 100644 index 00000000000..03d6b2d6f3d --- /dev/null +++ b/chromium/base/test/thread_test_helper.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/thread_test_helper.h" + +#include <utility> + +#include "base/bind.h" +#include "base/location.h" +#include "base/threading/thread_restrictions.h" + +namespace base { + +ThreadTestHelper::ThreadTestHelper( + scoped_refptr<SequencedTaskRunner> target_sequence) + : test_result_(false), + target_sequence_(std::move(target_sequence)), + done_event_(WaitableEvent::ResetPolicy::AUTOMATIC, + WaitableEvent::InitialState::NOT_SIGNALED) {} + +bool ThreadTestHelper::Run() { + if (!target_sequence_->PostTask( + FROM_HERE, base::BindOnce(&ThreadTestHelper::RunOnSequence, this))) { + return false; + } + base::ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + done_event_.Wait(); + return test_result_; +} + +void ThreadTestHelper::RunTest() { set_test_result(true); } + +ThreadTestHelper::~ThreadTestHelper() = default; + +void ThreadTestHelper::RunOnSequence() { + RunTest(); + done_event_.Signal(); +} + +} // namespace base diff --git a/chromium/base/test/thread_test_helper.h b/chromium/base/test/thread_test_helper.h new file mode 100644 index 00000000000..935e7efc6b9 --- /dev/null +++ b/chromium/base/test/thread_test_helper.h @@ -0,0 +1,50 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_THREAD_TEST_HELPER_H_ +#define BASE_TEST_THREAD_TEST_HELPER_H_ + +#include "base/compiler_specific.h" +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/sequenced_task_runner.h" +#include "base/synchronization/waitable_event.h" + +namespace base { + +// Helper class that executes code on a given target sequence/thread while +// blocking on the invoking sequence/thread. To use, derive from this class and +// overwrite RunTest. An alternative use of this class is to use it directly. It +// will then block until all pending tasks on a given sequence/thread have been +// executed. +class ThreadTestHelper : public RefCountedThreadSafe<ThreadTestHelper> { + public: + explicit ThreadTestHelper(scoped_refptr<SequencedTaskRunner> target_sequence); + + // True if RunTest() was successfully executed on the target sequence. + bool Run() WARN_UNUSED_RESULT; + + virtual void RunTest(); + + protected: + friend class RefCountedThreadSafe<ThreadTestHelper>; + + virtual ~ThreadTestHelper(); + + // Use this method to store the result of RunTest(). + void set_test_result(bool test_result) { test_result_ = test_result; } + + private: + void RunOnSequence(); + + bool test_result_; + scoped_refptr<SequencedTaskRunner> target_sequence_; + WaitableEvent done_event_; + + DISALLOW_COPY_AND_ASSIGN(ThreadTestHelper); +}; + +} // namespace base + +#endif // BASE_TEST_THREAD_TEST_HELPER_H_ diff --git a/chromium/base/test/trace_event_analyzer.cc b/chromium/base/test/trace_event_analyzer.cc new file mode 100644 index 00000000000..78a6b9b4cae --- /dev/null +++ b/chromium/base/test/trace_event_analyzer.cc @@ -0,0 +1,1077 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/trace_event_analyzer.h" + +#include <math.h> + +#include <algorithm> +#include <set> + +#include "base/bind.h" +#include "base/json/json_reader.h" +#include "base/memory/ptr_util.h" +#include "base/memory/ref_counted_memory.h" +#include "base/run_loop.h" +#include "base/strings/pattern.h" +#include "base/trace_event/trace_buffer.h" +#include "base/trace_event/trace_config.h" +#include "base/trace_event/trace_log.h" +#include "base/values.h" + +namespace { +void OnTraceDataCollected(base::OnceClosure quit_closure, + base::trace_event::TraceResultBuffer* buffer, + const scoped_refptr<base::RefCountedString>& json, + bool has_more_events) { + buffer->AddFragment(json->data()); + if (!has_more_events) + std::move(quit_closure).Run(); +} +} // namespace + +namespace trace_analyzer { + +// TraceEvent + +TraceEvent::TraceEvent() + : thread(0, 0), + timestamp(0), + duration(0), + phase(TRACE_EVENT_PHASE_BEGIN), + other_event(nullptr) {} + +TraceEvent::TraceEvent(TraceEvent&& other) = default; + +TraceEvent::~TraceEvent() = default; + +TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default; + +bool TraceEvent::SetFromJSON(const base::Value* event_value) { + if (event_value->type() != base::Value::Type::DICTIONARY) { + LOG(ERROR) << "Value must be Type::DICTIONARY"; + return false; + } + const base::DictionaryValue* dictionary = + static_cast<const base::DictionaryValue*>(event_value); + + std::string phase_str; + const base::DictionaryValue* args = nullptr; + + if (!dictionary->GetString("ph", &phase_str)) { + LOG(ERROR) << "ph is missing from TraceEvent JSON"; + return false; + } + + phase = *phase_str.data(); + + bool may_have_duration = (phase == TRACE_EVENT_PHASE_COMPLETE); + bool require_origin = (phase != TRACE_EVENT_PHASE_METADATA); + bool require_id = (phase == TRACE_EVENT_PHASE_ASYNC_BEGIN || + phase == TRACE_EVENT_PHASE_ASYNC_STEP_INTO || + phase == TRACE_EVENT_PHASE_ASYNC_STEP_PAST || + phase == TRACE_EVENT_PHASE_MEMORY_DUMP || + phase == TRACE_EVENT_PHASE_ENTER_CONTEXT || + phase == TRACE_EVENT_PHASE_LEAVE_CONTEXT || + phase == TRACE_EVENT_PHASE_CREATE_OBJECT || + phase == TRACE_EVENT_PHASE_DELETE_OBJECT || + phase == TRACE_EVENT_PHASE_SNAPSHOT_OBJECT || + phase == TRACE_EVENT_PHASE_ASYNC_END); + + if (require_origin && !dictionary->GetInteger("pid", &thread.process_id)) { + LOG(ERROR) << "pid is missing from TraceEvent JSON"; + return false; + } + if (require_origin && !dictionary->GetInteger("tid", &thread.thread_id)) { + LOG(ERROR) << "tid is missing from TraceEvent JSON"; + return false; + } + if (require_origin && !dictionary->GetDouble("ts", ×tamp)) { + LOG(ERROR) << "ts is missing from TraceEvent JSON"; + return false; + } + if (may_have_duration) { + dictionary->GetDouble("dur", &duration); + } + if (!dictionary->GetString("cat", &category)) { + LOG(ERROR) << "cat is missing from TraceEvent JSON"; + return false; + } + if (!dictionary->GetString("name", &name)) { + LOG(ERROR) << "name is missing from TraceEvent JSON"; + return false; + } + if (!dictionary->GetDictionary("args", &args)) { + std::string stripped_args; + // If argument filter is enabled, the arguments field contains a string + // value. + if (!dictionary->GetString("args", &stripped_args) || + stripped_args != "__stripped__") { + LOG(ERROR) << "args is missing from TraceEvent JSON"; + return false; + } + } + if (require_id && !dictionary->GetString("id", &id) && + !dictionary->FindKey("id2")) { + LOG(ERROR) + << "id/id2 is missing from ASYNC_BEGIN/ASYNC_END TraceEvent JSON"; + return false; + } + + dictionary->GetDouble("tdur", &thread_duration); + dictionary->GetDouble("tts", &thread_timestamp); + dictionary->GetString("scope", &scope); + dictionary->GetString("bind_id", &bind_id); + dictionary->GetBoolean("flow_out", &flow_out); + dictionary->GetBoolean("flow_in", &flow_in); + + const base::DictionaryValue* id2; + if (dictionary->GetDictionary("id2", &id2)) { + id2->GetString("global", &global_id2); + id2->GetString("local", &local_id2); + } + + // For each argument, copy the type and create a trace_analyzer::TraceValue. + if (args) { + for (base::DictionaryValue::Iterator it(*args); !it.IsAtEnd(); + it.Advance()) { + std::string str; + bool boolean = false; + int int_num = 0; + double double_num = 0.0; + if (it.value().GetAsString(&str)) { + arg_strings[it.key()] = str; + } else if (it.value().GetAsInteger(&int_num)) { + arg_numbers[it.key()] = static_cast<double>(int_num); + } else if (it.value().GetAsBoolean(&boolean)) { + arg_numbers[it.key()] = static_cast<double>(boolean ? 1 : 0); + } else if (it.value().GetAsDouble(&double_num)) { + arg_numbers[it.key()] = double_num; + } + // Record all arguments as values. + arg_values[it.key()] = it.value().CreateDeepCopy(); + } + } + + return true; +} + +double TraceEvent::GetAbsTimeToOtherEvent() const { + return fabs(other_event->timestamp - timestamp); +} + +bool TraceEvent::GetArgAsString(const std::string& name, + std::string* arg) const { + const auto it = arg_strings.find(name); + if (it != arg_strings.end()) { + *arg = it->second; + return true; + } + return false; +} + +bool TraceEvent::GetArgAsNumber(const std::string& name, + double* arg) const { + const auto it = arg_numbers.find(name); + if (it != arg_numbers.end()) { + *arg = it->second; + return true; + } + return false; +} + +bool TraceEvent::GetArgAsValue(const std::string& name, + std::unique_ptr<base::Value>* arg) const { + const auto it = arg_values.find(name); + if (it != arg_values.end()) { + *arg = it->second->CreateDeepCopy(); + return true; + } + return false; +} + +bool TraceEvent::HasStringArg(const std::string& name) const { + return (arg_strings.find(name) != arg_strings.end()); +} + +bool TraceEvent::HasNumberArg(const std::string& name) const { + return (arg_numbers.find(name) != arg_numbers.end()); +} + +bool TraceEvent::HasArg(const std::string& name) const { + return (arg_values.find(name) != arg_values.end()); +} + +std::string TraceEvent::GetKnownArgAsString(const std::string& name) const { + std::string arg_string; + bool result = GetArgAsString(name, &arg_string); + DCHECK(result); + return arg_string; +} + +double TraceEvent::GetKnownArgAsDouble(const std::string& name) const { + double arg_double = 0; + bool result = GetArgAsNumber(name, &arg_double); + DCHECK(result); + return arg_double; +} + +int TraceEvent::GetKnownArgAsInt(const std::string& name) const { + double arg_double = 0; + bool result = GetArgAsNumber(name, &arg_double); + DCHECK(result); + return static_cast<int>(arg_double); +} + +bool TraceEvent::GetKnownArgAsBool(const std::string& name) const { + double arg_double = 0; + bool result = GetArgAsNumber(name, &arg_double); + DCHECK(result); + return (arg_double != 0.0); +} + +std::unique_ptr<base::Value> TraceEvent::GetKnownArgAsValue( + const std::string& name) const { + std::unique_ptr<base::Value> arg_value; + bool result = GetArgAsValue(name, &arg_value); + DCHECK(result); + return arg_value; +} + +// QueryNode + +QueryNode::QueryNode(const Query& query) : query_(query) { +} + +QueryNode::~QueryNode() = default; + +// Query + +Query::Query(TraceEventMember member) + : type_(QUERY_EVENT_MEMBER), + operator_(OP_INVALID), + member_(member), + number_(0), + is_pattern_(false) { +} + +Query::Query(TraceEventMember member, const std::string& arg_name) + : type_(QUERY_EVENT_MEMBER), + operator_(OP_INVALID), + member_(member), + number_(0), + string_(arg_name), + is_pattern_(false) { +} + +Query::Query(const Query& query) = default; + +Query::~Query() = default; + +Query Query::String(const std::string& str) { + return Query(str); +} + +Query Query::Double(double num) { + return Query(num); +} + +Query Query::Int(int32_t num) { + return Query(static_cast<double>(num)); +} + +Query Query::Uint(uint32_t num) { + return Query(static_cast<double>(num)); +} + +Query Query::Bool(bool boolean) { + return Query(boolean ? 1.0 : 0.0); +} + +Query Query::Phase(char phase) { + return Query(static_cast<double>(phase)); +} + +Query Query::Pattern(const std::string& pattern) { + Query query(pattern); + query.is_pattern_ = true; + return query; +} + +bool Query::Evaluate(const TraceEvent& event) const { + // First check for values that can convert to bool. + + // double is true if != 0: + double bool_value = 0.0; + bool is_bool = GetAsDouble(event, &bool_value); + if (is_bool) + return (bool_value != 0.0); + + // string is true if it is non-empty: + std::string str_value; + bool is_str = GetAsString(event, &str_value); + if (is_str) + return !str_value.empty(); + + DCHECK_EQ(QUERY_BOOLEAN_OPERATOR, type_) + << "Invalid query: missing boolean expression"; + DCHECK(left_.get()); + DCHECK(right_.get() || is_unary_operator()); + + if (is_comparison_operator()) { + DCHECK(left().is_value() && right().is_value()) + << "Invalid query: comparison operator used between event member and " + "value."; + bool compare_result = false; + if (CompareAsDouble(event, &compare_result)) + return compare_result; + if (CompareAsString(event, &compare_result)) + return compare_result; + return false; + } + // It's a logical operator. + switch (operator_) { + case OP_AND: + return left().Evaluate(event) && right().Evaluate(event); + case OP_OR: + return left().Evaluate(event) || right().Evaluate(event); + case OP_NOT: + return !left().Evaluate(event); + default: + NOTREACHED(); + return false; + } +} + +bool Query::CompareAsDouble(const TraceEvent& event, bool* result) const { + double lhs, rhs; + if (!left().GetAsDouble(event, &lhs) || !right().GetAsDouble(event, &rhs)) + return false; + switch (operator_) { + case OP_EQ: + *result = (lhs == rhs); + return true; + case OP_NE: + *result = (lhs != rhs); + return true; + case OP_LT: + *result = (lhs < rhs); + return true; + case OP_LE: + *result = (lhs <= rhs); + return true; + case OP_GT: + *result = (lhs > rhs); + return true; + case OP_GE: + *result = (lhs >= rhs); + return true; + default: + NOTREACHED(); + return false; + } +} + +bool Query::CompareAsString(const TraceEvent& event, bool* result) const { + std::string lhs, rhs; + if (!left().GetAsString(event, &lhs) || !right().GetAsString(event, &rhs)) + return false; + switch (operator_) { + case OP_EQ: + if (right().is_pattern_) + *result = base::MatchPattern(lhs, rhs); + else if (left().is_pattern_) + *result = base::MatchPattern(rhs, lhs); + else + *result = (lhs == rhs); + return true; + case OP_NE: + if (right().is_pattern_) + *result = !base::MatchPattern(lhs, rhs); + else if (left().is_pattern_) + *result = !base::MatchPattern(rhs, lhs); + else + *result = (lhs != rhs); + return true; + case OP_LT: + *result = (lhs < rhs); + return true; + case OP_LE: + *result = (lhs <= rhs); + return true; + case OP_GT: + *result = (lhs > rhs); + return true; + case OP_GE: + *result = (lhs >= rhs); + return true; + default: + NOTREACHED(); + return false; + } +} + +bool Query::EvaluateArithmeticOperator(const TraceEvent& event, + double* num) const { + DCHECK_EQ(QUERY_ARITHMETIC_OPERATOR, type_); + DCHECK(left_.get()); + DCHECK(right_.get() || is_unary_operator()); + + double lhs = 0, rhs = 0; + if (!left().GetAsDouble(event, &lhs)) + return false; + if (!is_unary_operator() && !right().GetAsDouble(event, &rhs)) + return false; + + switch (operator_) { + case OP_ADD: + *num = lhs + rhs; + return true; + case OP_SUB: + *num = lhs - rhs; + return true; + case OP_MUL: + *num = lhs * rhs; + return true; + case OP_DIV: + *num = lhs / rhs; + return true; + case OP_MOD: + *num = static_cast<double>(static_cast<int64_t>(lhs) % + static_cast<int64_t>(rhs)); + return true; + case OP_NEGATE: + *num = -lhs; + return true; + default: + NOTREACHED(); + return false; + } +} + +bool Query::GetAsDouble(const TraceEvent& event, double* num) const { + switch (type_) { + case QUERY_ARITHMETIC_OPERATOR: + return EvaluateArithmeticOperator(event, num); + case QUERY_EVENT_MEMBER: + return GetMemberValueAsDouble(event, num); + case QUERY_NUMBER: + *num = number_; + return true; + default: + return false; + } +} + +bool Query::GetAsString(const TraceEvent& event, std::string* str) const { + switch (type_) { + case QUERY_EVENT_MEMBER: + return GetMemberValueAsString(event, str); + case QUERY_STRING: + *str = string_; + return true; + default: + return false; + } +} + +const TraceEvent* Query::SelectTargetEvent(const TraceEvent* event, + TraceEventMember member) { + if (member >= OTHER_FIRST_MEMBER && member <= OTHER_LAST_MEMBER) + return event->other_event; + if (member >= PREV_FIRST_MEMBER && member <= PREV_LAST_MEMBER) + return event->prev_event; + return event; +} + +bool Query::GetMemberValueAsDouble(const TraceEvent& event, + double* num) const { + DCHECK_EQ(QUERY_EVENT_MEMBER, type_); + + // This could be a request for a member of |event| or a member of |event|'s + // associated previous or next event. Store the target event in the_event: + const TraceEvent* the_event = SelectTargetEvent(&event, member_); + + // Request for member of associated event, but there is no associated event. + if (!the_event) + return false; + + switch (member_) { + case EVENT_PID: + case OTHER_PID: + case PREV_PID: + *num = static_cast<double>(the_event->thread.process_id); + return true; + case EVENT_TID: + case OTHER_TID: + case PREV_TID: + *num = static_cast<double>(the_event->thread.thread_id); + return true; + case EVENT_TIME: + case OTHER_TIME: + case PREV_TIME: + *num = the_event->timestamp; + return true; + case EVENT_DURATION: + if (!the_event->has_other_event()) + return false; + *num = the_event->GetAbsTimeToOtherEvent(); + return true; + case EVENT_COMPLETE_DURATION: + if (the_event->phase != TRACE_EVENT_PHASE_COMPLETE) + return false; + *num = the_event->duration; + return true; + case EVENT_PHASE: + case OTHER_PHASE: + case PREV_PHASE: + *num = static_cast<double>(the_event->phase); + return true; + case EVENT_HAS_STRING_ARG: + case OTHER_HAS_STRING_ARG: + case PREV_HAS_STRING_ARG: + *num = (the_event->HasStringArg(string_) ? 1.0 : 0.0); + return true; + case EVENT_HAS_NUMBER_ARG: + case OTHER_HAS_NUMBER_ARG: + case PREV_HAS_NUMBER_ARG: + *num = (the_event->HasNumberArg(string_) ? 1.0 : 0.0); + return true; + case EVENT_ARG: + case OTHER_ARG: + case PREV_ARG: { + // Search for the argument name and return its value if found. + auto num_i = the_event->arg_numbers.find(string_); + if (num_i == the_event->arg_numbers.end()) + return false; + *num = num_i->second; + return true; + } + case EVENT_HAS_OTHER: + // return 1.0 (true) if the other event exists + *num = event.other_event ? 1.0 : 0.0; + return true; + case EVENT_HAS_PREV: + *num = event.prev_event ? 1.0 : 0.0; + return true; + default: + return false; + } +} + +bool Query::GetMemberValueAsString(const TraceEvent& event, + std::string* str) const { + DCHECK_EQ(QUERY_EVENT_MEMBER, type_); + + // This could be a request for a member of |event| or a member of |event|'s + // associated previous or next event. Store the target event in the_event: + const TraceEvent* the_event = SelectTargetEvent(&event, member_); + + // Request for member of associated event, but there is no associated event. + if (!the_event) + return false; + + switch (member_) { + case EVENT_CATEGORY: + case OTHER_CATEGORY: + case PREV_CATEGORY: + *str = the_event->category; + return true; + case EVENT_NAME: + case OTHER_NAME: + case PREV_NAME: + *str = the_event->name; + return true; + case EVENT_ID: + case OTHER_ID: + case PREV_ID: + *str = the_event->id; + return true; + case EVENT_ARG: + case OTHER_ARG: + case PREV_ARG: { + // Search for the argument name and return its value if found. + auto str_i = the_event->arg_strings.find(string_); + if (str_i == the_event->arg_strings.end()) + return false; + *str = str_i->second; + return true; + } + default: + return false; + } +} + +Query::Query(const std::string& str) + : type_(QUERY_STRING), + operator_(OP_INVALID), + member_(EVENT_INVALID), + number_(0), + string_(str), + is_pattern_(false) { +} + +Query::Query(double num) + : type_(QUERY_NUMBER), + operator_(OP_INVALID), + member_(EVENT_INVALID), + number_(num), + is_pattern_(false) { +} +const Query& Query::left() const { + return left_->query(); +} + +const Query& Query::right() const { + return right_->query(); +} + +Query Query::operator==(const Query& rhs) const { + return Query(*this, rhs, OP_EQ); +} + +Query Query::operator!=(const Query& rhs) const { + return Query(*this, rhs, OP_NE); +} + +Query Query::operator<(const Query& rhs) const { + return Query(*this, rhs, OP_LT); +} + +Query Query::operator<=(const Query& rhs) const { + return Query(*this, rhs, OP_LE); +} + +Query Query::operator>(const Query& rhs) const { + return Query(*this, rhs, OP_GT); +} + +Query Query::operator>=(const Query& rhs) const { + return Query(*this, rhs, OP_GE); +} + +Query Query::operator&&(const Query& rhs) const { + return Query(*this, rhs, OP_AND); +} + +Query Query::operator||(const Query& rhs) const { + return Query(*this, rhs, OP_OR); +} + +Query Query::operator!() const { + return Query(*this, OP_NOT); +} + +Query Query::operator+(const Query& rhs) const { + return Query(*this, rhs, OP_ADD); +} + +Query Query::operator-(const Query& rhs) const { + return Query(*this, rhs, OP_SUB); +} + +Query Query::operator*(const Query& rhs) const { + return Query(*this, rhs, OP_MUL); +} + +Query Query::operator/(const Query& rhs) const { + return Query(*this, rhs, OP_DIV); +} + +Query Query::operator%(const Query& rhs) const { + return Query(*this, rhs, OP_MOD); +} + +Query Query::operator-() const { + return Query(*this, OP_NEGATE); +} + + +Query::Query(const Query& left, const Query& right, Operator binary_op) + : operator_(binary_op), + left_(new QueryNode(left)), + right_(new QueryNode(right)), + member_(EVENT_INVALID), + number_(0) { + type_ = (binary_op < OP_ADD ? + QUERY_BOOLEAN_OPERATOR : QUERY_ARITHMETIC_OPERATOR); +} + +Query::Query(const Query& left, Operator unary_op) + : operator_(unary_op), + left_(new QueryNode(left)), + member_(EVENT_INVALID), + number_(0) { + type_ = (unary_op < OP_ADD ? + QUERY_BOOLEAN_OPERATOR : QUERY_ARITHMETIC_OPERATOR); +} + +namespace { + +// Search |events| for |query| and add matches to |output|. +size_t FindMatchingEvents(const std::vector<TraceEvent>& events, + const Query& query, + TraceEventVector* output, + bool ignore_metadata_events) { + for (const auto& i : events) { + if (ignore_metadata_events && i.phase == TRACE_EVENT_PHASE_METADATA) + continue; + if (query.Evaluate(i)) + output->push_back(&i); + } + return output->size(); +} + +bool ParseEventsFromJson(const std::string& json, + std::vector<TraceEvent>* output) { + base::Optional<base::Value> root = base::JSONReader::Read(json); + + if (!root) + return false; + + base::Value::ListView list; + if (root->is_list()) { + list = root->GetList(); + } else if (root->is_dict()) { + base::Value* trace_events = root->FindListKey("traceEvents"); + if (!trace_events) + return false; + + list = trace_events->GetList(); + } else { + return false; + } + + for (const auto& item : list) { + TraceEvent event; + if (!event.SetFromJSON(&item)) + return false; + output->push_back(std::move(event)); + } + + return true; +} + +} // namespace + +// TraceAnalyzer + +TraceAnalyzer::TraceAnalyzer() + : ignore_metadata_events_(false), allow_association_changes_(true) {} + +TraceAnalyzer::~TraceAnalyzer() = default; + +// static +TraceAnalyzer* TraceAnalyzer::Create(const std::string& json_events) { + std::unique_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer()); + if (analyzer->SetEvents(json_events)) + return analyzer.release(); + return nullptr; +} + +bool TraceAnalyzer::SetEvents(const std::string& json_events) { + raw_events_.clear(); + if (!ParseEventsFromJson(json_events, &raw_events_)) + return false; + std::stable_sort(raw_events_.begin(), raw_events_.end()); + ParseMetadata(); + return true; +} + +void TraceAnalyzer::AssociateBeginEndEvents() { + using trace_analyzer::Query; + + Query begin(Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN)); + Query end(Query::EventPhaseIs(TRACE_EVENT_PHASE_END)); + Query match(Query::EventName() == Query::OtherName() && + Query::EventCategory() == Query::OtherCategory() && + Query::EventTid() == Query::OtherTid() && + Query::EventPid() == Query::OtherPid()); + + AssociateEvents(begin, end, match); +} + +void TraceAnalyzer::AssociateAsyncBeginEndEvents(bool match_pid) { + using trace_analyzer::Query; + + Query begin( + Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) || + Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_INTO) || + Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_PAST)); + Query end(Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_END) || + Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_INTO) || + Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_STEP_PAST)); + Query match(Query::EventCategory() == Query::OtherCategory() && + Query::EventId() == Query::OtherId()); + + if (match_pid) { + match = match && Query::EventPid() == Query::OtherPid(); + } + + AssociateEvents(begin, end, match); +} + +void TraceAnalyzer::AssociateEvents(const Query& first, + const Query& second, + const Query& match) { + DCHECK(allow_association_changes_) + << "AssociateEvents not allowed after FindEvents"; + + // Search for matching begin/end event pairs. When a matching end is found, + // it is associated with the begin event. + std::vector<TraceEvent*> begin_stack; + for (auto& this_event : raw_events_) { + if (second.Evaluate(this_event)) { + // Search stack for matching begin, starting from end. + for (int stack_index = static_cast<int>(begin_stack.size()) - 1; + stack_index >= 0; --stack_index) { + TraceEvent& begin_event = *begin_stack[stack_index]; + + // Temporarily set other to test against the match query. + const TraceEvent* other_backup = begin_event.other_event; + begin_event.other_event = &this_event; + if (match.Evaluate(begin_event)) { + // Found a matching begin/end pair. + // Set the associated previous event + this_event.prev_event = &begin_event; + // Erase the matching begin event index from the stack. + begin_stack.erase(begin_stack.begin() + stack_index); + break; + } + + // Not a match, restore original other and continue. + begin_event.other_event = other_backup; + } + } + // Even if this_event is a |second| event that has matched an earlier + // |first| event, it can still also be a |first| event and be associated + // with a later |second| event. + if (first.Evaluate(this_event)) { + begin_stack.push_back(&this_event); + } + } +} + +void TraceAnalyzer::MergeAssociatedEventArgs() { + for (auto& i : raw_events_) { + // Merge all associated events with the first event. + const TraceEvent* other = i.other_event; + // Avoid looping by keeping set of encountered TraceEvents. + std::set<const TraceEvent*> encounters; + encounters.insert(&i); + while (other && encounters.find(other) == encounters.end()) { + encounters.insert(other); + i.arg_numbers.insert(other->arg_numbers.begin(), + other->arg_numbers.end()); + i.arg_strings.insert(other->arg_strings.begin(), + other->arg_strings.end()); + other = other->other_event; + } + } +} + +size_t TraceAnalyzer::FindEvents(const Query& query, TraceEventVector* output) { + allow_association_changes_ = false; + output->clear(); + return FindMatchingEvents( + raw_events_, query, output, ignore_metadata_events_); +} + +const TraceEvent* TraceAnalyzer::FindFirstOf(const Query& query) { + TraceEventVector output; + if (FindEvents(query, &output) > 0) + return output.front(); + return nullptr; +} + +const TraceEvent* TraceAnalyzer::FindLastOf(const Query& query) { + TraceEventVector output; + if (FindEvents(query, &output) > 0) + return output.back(); + return nullptr; +} + +const std::string& TraceAnalyzer::GetThreadName( + const TraceEvent::ProcessThreadID& thread) { + // If thread is not found, just add and return empty string. + return thread_names_[thread]; +} + +void TraceAnalyzer::ParseMetadata() { + for (const auto& this_event : raw_events_) { + // Check for thread name metadata. + if (this_event.phase != TRACE_EVENT_PHASE_METADATA || + this_event.name != "thread_name") + continue; + std::map<std::string, std::string>::const_iterator string_it = + this_event.arg_strings.find("name"); + if (string_it != this_event.arg_strings.end()) + thread_names_[this_event.thread] = string_it->second; + } +} + +// Utility functions for collecting process-local traces and creating a +// |TraceAnalyzer| from the result. + +void Start(const std::string& category_filter_string) { + DCHECK(!base::trace_event::TraceLog::GetInstance()->IsEnabled()); + base::trace_event::TraceLog::GetInstance()->SetEnabled( + base::trace_event::TraceConfig(category_filter_string, ""), + base::trace_event::TraceLog::RECORDING_MODE); +} + +std::unique_ptr<TraceAnalyzer> Stop() { + DCHECK(base::trace_event::TraceLog::GetInstance()->IsEnabled()); + base::trace_event::TraceLog::GetInstance()->SetDisabled(); + + base::trace_event::TraceResultBuffer buffer; + base::trace_event::TraceResultBuffer::SimpleOutput trace_output; + buffer.SetOutputCallback(trace_output.GetCallback()); + base::RunLoop run_loop; + buffer.Start(); + base::trace_event::TraceLog::GetInstance()->Flush( + base::BindRepeating(&OnTraceDataCollected, run_loop.QuitClosure(), + base::Unretained(&buffer))); + run_loop.Run(); + buffer.Finish(); + + return base::WrapUnique(TraceAnalyzer::Create(trace_output.json_output)); +} + +// TraceEventVector utility functions. + +bool GetRateStats(const TraceEventVector& events, + RateStats* stats, + const RateStatsOptions* options) { + DCHECK(stats); + // Need at least 3 events to calculate rate stats. + const size_t kMinEvents = 3; + if (events.size() < kMinEvents) { + LOG(ERROR) << "Not enough events: " << events.size(); + return false; + } + + std::vector<double> deltas; + size_t num_deltas = events.size() - 1; + for (size_t i = 0; i < num_deltas; ++i) { + double delta = events.at(i + 1)->timestamp - events.at(i)->timestamp; + if (delta < 0.0) { + LOG(ERROR) << "Events are out of order"; + return false; + } + deltas.push_back(delta); + } + + std::sort(deltas.begin(), deltas.end()); + + if (options) { + if (options->trim_min + options->trim_max > events.size() - kMinEvents) { + LOG(ERROR) << "Attempt to trim too many events"; + return false; + } + deltas.erase(deltas.begin(), deltas.begin() + options->trim_min); + deltas.erase(deltas.end() - options->trim_max, deltas.end()); + } + + num_deltas = deltas.size(); + double delta_sum = 0.0; + for (size_t i = 0; i < num_deltas; ++i) + delta_sum += deltas[i]; + + stats->min_us = *std::min_element(deltas.begin(), deltas.end()); + stats->max_us = *std::max_element(deltas.begin(), deltas.end()); + stats->mean_us = delta_sum / static_cast<double>(num_deltas); + + double sum_mean_offsets_squared = 0.0; + for (size_t i = 0; i < num_deltas; ++i) { + double offset = fabs(deltas[i] - stats->mean_us); + sum_mean_offsets_squared += offset * offset; + } + stats->standard_deviation_us = + sqrt(sum_mean_offsets_squared / static_cast<double>(num_deltas - 1)); + + return true; +} + +bool FindFirstOf(const TraceEventVector& events, + const Query& query, + size_t position, + size_t* return_index) { + DCHECK(return_index); + for (size_t i = position; i < events.size(); ++i) { + if (query.Evaluate(*events[i])) { + *return_index = i; + return true; + } + } + return false; +} + +bool FindLastOf(const TraceEventVector& events, + const Query& query, + size_t position, + size_t* return_index) { + DCHECK(return_index); + for (size_t i = std::min(position + 1, events.size()); i != 0; --i) { + if (query.Evaluate(*events[i - 1])) { + *return_index = i - 1; + return true; + } + } + return false; +} + +bool FindClosest(const TraceEventVector& events, + const Query& query, + size_t position, + size_t* return_closest, + size_t* return_second_closest) { + DCHECK(return_closest); + if (events.empty() || position >= events.size()) + return false; + size_t closest = events.size(); + size_t second_closest = events.size(); + for (size_t i = 0; i < events.size(); ++i) { + if (!query.Evaluate(*events.at(i))) + continue; + if (closest == events.size()) { + closest = i; + continue; + } + if (fabs(events.at(i)->timestamp - events.at(position)->timestamp) < + fabs(events.at(closest)->timestamp - events.at(position)->timestamp)) { + second_closest = closest; + closest = i; + } else if (second_closest == events.size()) { + second_closest = i; + } + } + + if (closest < events.size() && + (!return_second_closest || second_closest < events.size())) { + *return_closest = closest; + if (return_second_closest) + *return_second_closest = second_closest; + return true; + } + + return false; +} + +size_t CountMatches(const TraceEventVector& events, + const Query& query, + size_t begin_position, + size_t end_position) { + if (begin_position >= events.size()) + return 0u; + end_position = (end_position < events.size()) ? end_position : events.size(); + size_t count = 0u; + for (size_t i = begin_position; i < end_position; ++i) { + if (query.Evaluate(*events.at(i))) + ++count; + } + return count; +} + +} // namespace trace_analyzer diff --git a/chromium/base/test/trace_event_analyzer.h b/chromium/base/test/trace_event_analyzer.h new file mode 100644 index 00000000000..dcdd2e4b5ec --- /dev/null +++ b/chromium/base/test/trace_event_analyzer.h @@ -0,0 +1,842 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Use trace_analyzer::Query and trace_analyzer::TraceAnalyzer to search for +// specific trace events that were generated by the trace_event.h API. +// +// Basic procedure: +// - Get trace events JSON string from base::trace_event::TraceLog. +// - Create TraceAnalyzer with JSON string. +// - Call TraceAnalyzer::AssociateBeginEndEvents (optional). +// - Call TraceAnalyzer::AssociateEvents (zero or more times). +// - Call TraceAnalyzer::FindEvents with queries to find specific events. +// +// A Query is a boolean expression tree that evaluates to true or false for a +// given trace event. Queries can be combined into a tree using boolean, +// arithmetic and comparison operators that refer to data of an individual trace +// event. +// +// The events are returned as trace_analyzer::TraceEvent objects. +// TraceEvent contains a single trace event's data, as well as a pointer to +// a related trace event. The related trace event is typically the matching end +// of a begin event or the matching begin of an end event. +// +// The following examples use this basic setup code to construct TraceAnalyzer +// with the json trace string retrieved from TraceLog and construct an event +// vector for retrieving events: +// +// TraceAnalyzer analyzer(json_events); +// TraceEventVector events; +// +// EXAMPLE 1: Find events named "my_event". +// +// analyzer.FindEvents(Query(EVENT_NAME) == "my_event", &events); +// +// EXAMPLE 2: Find begin events named "my_event" with duration > 1 second. +// +// Query q = (Query(EVENT_NAME) == Query::String("my_event") && +// Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_BEGIN) && +// Query(EVENT_DURATION) > Query::Double(1000000.0)); +// analyzer.FindEvents(q, &events); +// +// EXAMPLE 3: Associating event pairs across threads. +// +// If the test needs to analyze something that starts and ends on different +// threads, the test needs to use INSTANT events. The typical procedure is to +// specify the same unique ID as a TRACE_EVENT argument on both the start and +// finish INSTANT events. Then use the following procedure to associate those +// events. +// +// Step 1: instrument code with custom begin/end trace events. +// [Thread 1 tracing code] +// TRACE_EVENT_INSTANT1("test_latency", "timing1_begin", "id", 3); +// [Thread 2 tracing code] +// TRACE_EVENT_INSTANT1("test_latency", "timing1_end", "id", 3); +// +// Step 2: associate these custom begin/end pairs. +// Query begin(Query(EVENT_NAME) == Query::String("timing1_begin")); +// Query end(Query(EVENT_NAME) == Query::String("timing1_end")); +// Query match(Query(EVENT_ARG, "id") == Query(OTHER_ARG, "id")); +// analyzer.AssociateEvents(begin, end, match); +// +// Step 3: search for "timing1_begin" events with existing other event. +// Query q = (Query(EVENT_NAME) == Query::String("timing1_begin") && +// Query(EVENT_HAS_OTHER)); +// analyzer.FindEvents(q, &events); +// +// Step 4: analyze events, such as checking durations. +// for (size_t i = 0; i < events.size(); ++i) { +// double duration; +// EXPECT_TRUE(events[i].GetAbsTimeToOtherEvent(&duration)); +// EXPECT_LT(duration, 1000000.0/60.0); // expect less than 1/60 second. +// } +// +// There are two helper functions, Start(category_filter_string) and Stop(), for +// facilitating the collection of process-local traces and building a +// TraceAnalyzer from them. A typical test, that uses the helper functions, +// looks like the following: +// +// TEST_F(...) { +// Start("*"); +// [Invoke the functions you want to test their traces] +// auto analyzer = Stop(); +// +// [Use the analyzer to verify produced traces, as explained above] +// } +// +// Note: The Stop() function needs a SingleThreadTaskRunner. + +#ifndef BASE_TEST_TRACE_EVENT_ANALYZER_H_ +#define BASE_TEST_TRACE_EVENT_ANALYZER_H_ + +#include <stddef.h> +#include <stdint.h> + +#include <map> +#include <memory> +#include <string> +#include <vector> + +#include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/trace_event/trace_event.h" + +namespace base { +class Value; +} + +namespace trace_analyzer { +class QueryNode; + +// trace_analyzer::TraceEvent is a more convenient form of the +// base::trace_event::TraceEvent class to make tracing-based tests easier to +// write. +struct TraceEvent { + // ProcessThreadID contains a Process ID and Thread ID. + struct ProcessThreadID { + ProcessThreadID() : process_id(0), thread_id(0) {} + ProcessThreadID(int process_id, int thread_id) + : process_id(process_id), thread_id(thread_id) {} + bool operator< (const ProcessThreadID& rhs) const { + if (process_id != rhs.process_id) + return process_id < rhs.process_id; + return thread_id < rhs.thread_id; + } + int process_id; + int thread_id; + }; + + TraceEvent(); + TraceEvent(TraceEvent&& other); + ~TraceEvent(); + + bool SetFromJSON(const base::Value* event_value) WARN_UNUSED_RESULT; + + bool operator< (const TraceEvent& rhs) const { + return timestamp < rhs.timestamp; + } + + TraceEvent& operator=(TraceEvent&& rhs); + + bool has_other_event() const { return other_event; } + + // Returns absolute duration in microseconds between this event and other + // event. Must have already verified that other_event exists by + // Query(EVENT_HAS_OTHER) or by calling has_other_event(). + double GetAbsTimeToOtherEvent() const; + + // Return the argument value if it exists and it is a string. + bool GetArgAsString(const std::string& name, std::string* arg) const; + // Return the argument value if it exists and it is a number. + bool GetArgAsNumber(const std::string& name, double* arg) const; + // Return the argument value if it exists. + bool GetArgAsValue(const std::string& name, + std::unique_ptr<base::Value>* arg) const; + + // Check if argument exists and is string. + bool HasStringArg(const std::string& name) const; + // Check if argument exists and is number (double, int or bool). + bool HasNumberArg(const std::string& name) const; + // Check if argument exists. + bool HasArg(const std::string& name) const; + + // Get known existing arguments as specific types. + // Useful when you have already queried the argument with + // Query(HAS_NUMBER_ARG) or Query(HAS_STRING_ARG). + std::string GetKnownArgAsString(const std::string& name) const; + double GetKnownArgAsDouble(const std::string& name) const; + int GetKnownArgAsInt(const std::string& name) const; + bool GetKnownArgAsBool(const std::string& name) const; + std::unique_ptr<base::Value> GetKnownArgAsValue( + const std::string& name) const; + + // Process ID and Thread ID. + ProcessThreadID thread; + + // Time since epoch in microseconds. + // Stored as double to match its JSON representation. + double timestamp; + double duration; + char phase; + std::string category; + std::string name; + std::string id; + double thread_duration = 0.0; + double thread_timestamp = 0.0; + std::string scope; + std::string bind_id; + bool flow_out = false; + bool flow_in = false; + std::string global_id2; + std::string local_id2; + + // All numbers and bool values from TraceEvent args are cast to double. + // bool becomes 1.0 (true) or 0.0 (false). + std::map<std::string, double> arg_numbers; + std::map<std::string, std::string> arg_strings; + std::map<std::string, std::unique_ptr<base::Value>> arg_values; + + // The other event associated with this event (or NULL). + const TraceEvent* other_event; + + // A back-link for |other_event|. That is, if other_event is not null, then + // |event->other_event->prev_event == event| is always true. + const TraceEvent* prev_event; +}; + +typedef std::vector<const TraceEvent*> TraceEventVector; + +class Query { + public: + Query(const Query& query); + + ~Query(); + + //////////////////////////////////////////////////////////////// + // Query literal values + + // Compare with the given string. + static Query String(const std::string& str); + + // Compare with the given number. + static Query Double(double num); + static Query Int(int32_t num); + static Query Uint(uint32_t num); + + // Compare with the given bool. + static Query Bool(bool boolean); + + // Compare with the given phase. + static Query Phase(char phase); + + // Compare with the given string pattern. Only works with == and != operators. + // Example: Query(EVENT_NAME) == Query::Pattern("MyEvent*") + static Query Pattern(const std::string& pattern); + + //////////////////////////////////////////////////////////////// + // Query event members + + static Query EventPid() { return Query(EVENT_PID); } + + static Query EventTid() { return Query(EVENT_TID); } + + // Return the timestamp of the event in microseconds since epoch. + static Query EventTime() { return Query(EVENT_TIME); } + + // Return the absolute time between event and other event in microseconds. + // Only works if Query::EventHasOther() == true. + static Query EventDuration() { return Query(EVENT_DURATION); } + + // Return the duration of a COMPLETE event. + static Query EventCompleteDuration() { + return Query(EVENT_COMPLETE_DURATION); + } + + static Query EventPhase() { return Query(EVENT_PHASE); } + + static Query EventCategory() { return Query(EVENT_CATEGORY); } + + static Query EventName() { return Query(EVENT_NAME); } + + static Query EventId() { return Query(EVENT_ID); } + + static Query EventPidIs(int process_id) { + return Query(EVENT_PID) == Query::Int(process_id); + } + + static Query EventTidIs(int thread_id) { + return Query(EVENT_TID) == Query::Int(thread_id); + } + + static Query EventThreadIs(const TraceEvent::ProcessThreadID& thread) { + return EventPidIs(thread.process_id) && EventTidIs(thread.thread_id); + } + + static Query EventTimeIs(double timestamp) { + return Query(EVENT_TIME) == Query::Double(timestamp); + } + + static Query EventDurationIs(double duration) { + return Query(EVENT_DURATION) == Query::Double(duration); + } + + static Query EventPhaseIs(char phase) { + return Query(EVENT_PHASE) == Query::Phase(phase); + } + + static Query EventCategoryIs(const std::string& category) { + return Query(EVENT_CATEGORY) == Query::String(category); + } + + static Query EventNameIs(const std::string& name) { + return Query(EVENT_NAME) == Query::String(name); + } + + static Query EventIdIs(const std::string& id) { + return Query(EVENT_ID) == Query::String(id); + } + + // Evaluates to true if arg exists and is a string. + static Query EventHasStringArg(const std::string& arg_name) { + return Query(EVENT_HAS_STRING_ARG, arg_name); + } + + // Evaluates to true if arg exists and is a number. + // Number arguments include types double, int and bool. + static Query EventHasNumberArg(const std::string& arg_name) { + return Query(EVENT_HAS_NUMBER_ARG, arg_name); + } + + // Evaluates to arg value (string or number). + static Query EventArg(const std::string& arg_name) { + return Query(EVENT_ARG, arg_name); + } + + // Return true if associated event exists. + static Query EventHasOther() { return Query(EVENT_HAS_OTHER); } + + // Access the associated other_event's members: + + static Query OtherPid() { return Query(OTHER_PID); } + + static Query OtherTid() { return Query(OTHER_TID); } + + static Query OtherTime() { return Query(OTHER_TIME); } + + static Query OtherPhase() { return Query(OTHER_PHASE); } + + static Query OtherCategory() { return Query(OTHER_CATEGORY); } + + static Query OtherName() { return Query(OTHER_NAME); } + + static Query OtherId() { return Query(OTHER_ID); } + + static Query OtherPidIs(int process_id) { + return Query(OTHER_PID) == Query::Int(process_id); + } + + static Query OtherTidIs(int thread_id) { + return Query(OTHER_TID) == Query::Int(thread_id); + } + + static Query OtherThreadIs(const TraceEvent::ProcessThreadID& thread) { + return OtherPidIs(thread.process_id) && OtherTidIs(thread.thread_id); + } + + static Query OtherTimeIs(double timestamp) { + return Query(OTHER_TIME) == Query::Double(timestamp); + } + + static Query OtherPhaseIs(char phase) { + return Query(OTHER_PHASE) == Query::Phase(phase); + } + + static Query OtherCategoryIs(const std::string& category) { + return Query(OTHER_CATEGORY) == Query::String(category); + } + + static Query OtherNameIs(const std::string& name) { + return Query(OTHER_NAME) == Query::String(name); + } + + static Query OtherIdIs(const std::string& id) { + return Query(OTHER_ID) == Query::String(id); + } + + // Evaluates to true if arg exists and is a string. + static Query OtherHasStringArg(const std::string& arg_name) { + return Query(OTHER_HAS_STRING_ARG, arg_name); + } + + // Evaluates to true if arg exists and is a number. + // Number arguments include types double, int and bool. + static Query OtherHasNumberArg(const std::string& arg_name) { + return Query(OTHER_HAS_NUMBER_ARG, arg_name); + } + + // Evaluates to arg value (string or number). + static Query OtherArg(const std::string& arg_name) { + return Query(OTHER_ARG, arg_name); + } + + // Access the associated prev_event's members: + + static Query PrevPid() { return Query(PREV_PID); } + + static Query PrevTid() { return Query(PREV_TID); } + + static Query PrevTime() { return Query(PREV_TIME); } + + static Query PrevPhase() { return Query(PREV_PHASE); } + + static Query PrevCategory() { return Query(PREV_CATEGORY); } + + static Query PrevName() { return Query(PREV_NAME); } + + static Query PrevId() { return Query(PREV_ID); } + + static Query PrevPidIs(int process_id) { + return Query(PREV_PID) == Query::Int(process_id); + } + + static Query PrevTidIs(int thread_id) { + return Query(PREV_TID) == Query::Int(thread_id); + } + + static Query PrevThreadIs(const TraceEvent::ProcessThreadID& thread) { + return PrevPidIs(thread.process_id) && PrevTidIs(thread.thread_id); + } + + static Query PrevTimeIs(double timestamp) { + return Query(PREV_TIME) == Query::Double(timestamp); + } + + static Query PrevPhaseIs(char phase) { + return Query(PREV_PHASE) == Query::Phase(phase); + } + + static Query PrevCategoryIs(const std::string& category) { + return Query(PREV_CATEGORY) == Query::String(category); + } + + static Query PrevNameIs(const std::string& name) { + return Query(PREV_NAME) == Query::String(name); + } + + static Query PrevIdIs(const std::string& id) { + return Query(PREV_ID) == Query::String(id); + } + + // Evaluates to true if arg exists and is a string. + static Query PrevHasStringArg(const std::string& arg_name) { + return Query(PREV_HAS_STRING_ARG, arg_name); + } + + // Evaluates to true if arg exists and is a number. + // Number arguments include types double, int and bool. + static Query PrevHasNumberArg(const std::string& arg_name) { + return Query(PREV_HAS_NUMBER_ARG, arg_name); + } + + // Evaluates to arg value (string or number). + static Query PrevArg(const std::string& arg_name) { + return Query(PREV_ARG, arg_name); + } + + //////////////////////////////////////////////////////////////// + // Common queries: + + // Find BEGIN events that have a corresponding END event. + static Query MatchBeginWithEnd() { + return (Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_BEGIN)) && + Query(EVENT_HAS_OTHER); + } + + // Find COMPLETE events. + static Query MatchComplete() { + return (Query(EVENT_PHASE) == Query::Phase(TRACE_EVENT_PHASE_COMPLETE)); + } + + // Find ASYNC_BEGIN events that have a corresponding ASYNC_END event. + static Query MatchAsyncBeginWithNext() { + return (Query(EVENT_PHASE) == + Query::Phase(TRACE_EVENT_PHASE_ASYNC_BEGIN)) && + Query(EVENT_HAS_OTHER); + } + + // Find BEGIN events of given |name| which also have associated END events. + static Query MatchBeginName(const std::string& name) { + return (Query(EVENT_NAME) == Query(name)) && MatchBeginWithEnd(); + } + + // Find COMPLETE events of given |name|. + static Query MatchCompleteName(const std::string& name) { + return (Query(EVENT_NAME) == Query(name)) && MatchComplete(); + } + + // Match given Process ID and Thread ID. + static Query MatchThread(const TraceEvent::ProcessThreadID& thread) { + return (Query(EVENT_PID) == Query::Int(thread.process_id)) && + (Query(EVENT_TID) == Query::Int(thread.thread_id)); + } + + // Match event pair that spans multiple threads. + static Query MatchCrossThread() { + return (Query(EVENT_PID) != Query(OTHER_PID)) || + (Query(EVENT_TID) != Query(OTHER_TID)); + } + + //////////////////////////////////////////////////////////////// + // Operators: + + // Boolean operators: + Query operator==(const Query& rhs) const; + Query operator!=(const Query& rhs) const; + Query operator< (const Query& rhs) const; + Query operator<=(const Query& rhs) const; + Query operator> (const Query& rhs) const; + Query operator>=(const Query& rhs) const; + Query operator&&(const Query& rhs) const; + Query operator||(const Query& rhs) const; + Query operator!() const; + + // Arithmetic operators: + // Following operators are applied to double arguments: + Query operator+(const Query& rhs) const; + Query operator-(const Query& rhs) const; + Query operator*(const Query& rhs) const; + Query operator/(const Query& rhs) const; + Query operator-() const; + // Mod operates on int64_t args (doubles are casted to int64_t beforehand): + Query operator%(const Query& rhs) const; + + // Return true if the given event matches this query tree. + // This is a recursive method that walks the query tree. + bool Evaluate(const TraceEvent& event) const; + + enum TraceEventMember { + EVENT_INVALID, + EVENT_PID, + EVENT_TID, + EVENT_TIME, + EVENT_DURATION, + EVENT_COMPLETE_DURATION, + EVENT_PHASE, + EVENT_CATEGORY, + EVENT_NAME, + EVENT_ID, + EVENT_HAS_STRING_ARG, + EVENT_HAS_NUMBER_ARG, + EVENT_ARG, + EVENT_HAS_OTHER, + EVENT_HAS_PREV, + + OTHER_PID, + OTHER_TID, + OTHER_TIME, + OTHER_PHASE, + OTHER_CATEGORY, + OTHER_NAME, + OTHER_ID, + OTHER_HAS_STRING_ARG, + OTHER_HAS_NUMBER_ARG, + OTHER_ARG, + + PREV_PID, + PREV_TID, + PREV_TIME, + PREV_PHASE, + PREV_CATEGORY, + PREV_NAME, + PREV_ID, + PREV_HAS_STRING_ARG, + PREV_HAS_NUMBER_ARG, + PREV_ARG, + + OTHER_FIRST_MEMBER = OTHER_PID, + OTHER_LAST_MEMBER = OTHER_ARG, + + PREV_FIRST_MEMBER = PREV_PID, + PREV_LAST_MEMBER = PREV_ARG, + }; + + enum Operator { + OP_INVALID, + // Boolean operators: + OP_EQ, + OP_NE, + OP_LT, + OP_LE, + OP_GT, + OP_GE, + OP_AND, + OP_OR, + OP_NOT, + // Arithmetic operators: + OP_ADD, + OP_SUB, + OP_MUL, + OP_DIV, + OP_MOD, + OP_NEGATE + }; + + enum QueryType { + QUERY_BOOLEAN_OPERATOR, + QUERY_ARITHMETIC_OPERATOR, + QUERY_EVENT_MEMBER, + QUERY_NUMBER, + QUERY_STRING + }; + + // Compare with the given member. + explicit Query(TraceEventMember member); + + // Compare with the given member argument value. + Query(TraceEventMember member, const std::string& arg_name); + + // Compare with the given string. + explicit Query(const std::string& str); + + // Compare with the given number. + explicit Query(double num); + + // Construct a boolean Query that returns (left <binary_op> right). + Query(const Query& left, const Query& right, Operator binary_op); + + // Construct a boolean Query that returns (<binary_op> left). + Query(const Query& left, Operator unary_op); + + // Try to compare left_ against right_ based on operator_. + // If either left or right does not convert to double, false is returned. + // Otherwise, true is returned and |result| is set to the comparison result. + bool CompareAsDouble(const TraceEvent& event, bool* result) const; + + // Try to compare left_ against right_ based on operator_. + // If either left or right does not convert to string, false is returned. + // Otherwise, true is returned and |result| is set to the comparison result. + bool CompareAsString(const TraceEvent& event, bool* result) const; + + // Attempt to convert this Query to a double. On success, true is returned + // and the double value is stored in |num|. + bool GetAsDouble(const TraceEvent& event, double* num) const; + + // Attempt to convert this Query to a string. On success, true is returned + // and the string value is stored in |str|. + bool GetAsString(const TraceEvent& event, std::string* str) const; + + // Evaluate this Query as an arithmetic operator on left_ and right_. + bool EvaluateArithmeticOperator(const TraceEvent& event, + double* num) const; + + // For QUERY_EVENT_MEMBER Query: attempt to get the double value of the Query. + bool GetMemberValueAsDouble(const TraceEvent& event, double* num) const; + + // For QUERY_EVENT_MEMBER Query: attempt to get the string value of the Query. + bool GetMemberValueAsString(const TraceEvent& event, std::string* num) const; + + // Does this Query represent a value? + bool is_value() const { return type_ != QUERY_BOOLEAN_OPERATOR; } + + bool is_unary_operator() const { + return operator_ == OP_NOT || operator_ == OP_NEGATE; + } + + bool is_comparison_operator() const { + return operator_ != OP_INVALID && operator_ < OP_AND; + } + + static const TraceEvent* SelectTargetEvent(const TraceEvent* ev, + TraceEventMember member); + + const Query& left() const; + const Query& right() const; + + private: + QueryType type_; + Operator operator_; + scoped_refptr<QueryNode> left_; + scoped_refptr<QueryNode> right_; + TraceEventMember member_; + double number_; + std::string string_; + bool is_pattern_; +}; + +// Implementation detail: +// QueryNode allows Query to store a ref-counted query tree. +class QueryNode : public base::RefCounted<QueryNode> { + public: + explicit QueryNode(const Query& query); + const Query& query() const { return query_; } + + private: + friend class base::RefCounted<QueryNode>; + ~QueryNode(); + + Query query_; +}; + +// TraceAnalyzer helps tests search for trace events. +class TraceAnalyzer { + public: + ~TraceAnalyzer(); + + // Use trace events from JSON string generated by tracing API. + // Returns non-NULL if the JSON is successfully parsed. + static TraceAnalyzer* Create(const std::string& json_events) + WARN_UNUSED_RESULT; + + void SetIgnoreMetadataEvents(bool ignore) { + ignore_metadata_events_ = ignore; + } + + // Associate BEGIN and END events with each other. This allows Query(OTHER_*) + // to access the associated event and enables Query(EVENT_DURATION). + // An end event will match the most recent begin event with the same name, + // category, process ID and thread ID. This matches what is shown in + // about:tracing. After association, the BEGIN event will point to the + // matching END event, but the END event will not point to the BEGIN event. + void AssociateBeginEndEvents(); + + // Associate ASYNC_BEGIN, ASYNC_STEP and ASYNC_END events with each other. + // An ASYNC_END event will match the most recent ASYNC_BEGIN or ASYNC_STEP + // event with the same name, category, and ID. This creates a singly linked + // list of ASYNC_BEGIN->ASYNC_STEP...->ASYNC_END. + // |match_pid| - If true, will only match async events which are running + // under the same process ID, otherwise will allow linking + // async events from different processes. + void AssociateAsyncBeginEndEvents(bool match_pid = true); + + // AssociateEvents can be used to customize event associations by setting the + // other_event member of TraceEvent. This should be used to associate two + // INSTANT events. + // + // The assumptions are: + // - |first| events occur before |second| events. + // - the closest matching |second| event is the correct match. + // + // |first| - Eligible |first| events match this query. + // |second| - Eligible |second| events match this query. + // |match| - This query is run on the |first| event. The OTHER_* EventMember + // queries will point to an eligible |second| event. The query + // should evaluate to true if the |first|/|second| pair is a match. + // + // When a match is found, the pair will be associated by having the first + // event's other_event member point to the other. AssociateEvents does not + // clear previous associations, so it is possible to associate multiple pairs + // of events by calling AssociateEvents more than once with different queries. + // + // NOTE: AssociateEvents will overwrite existing other_event associations if + // the queries pass for events that already had a previous association. + // + // After calling any Find* method, it is not allowed to call AssociateEvents + // again. + void AssociateEvents(const Query& first, + const Query& second, + const Query& match); + + // For each event, copy its arguments to the other_event argument map. If + // argument name already exists, it will not be overwritten. + void MergeAssociatedEventArgs(); + + // Find all events that match query and replace output vector. + size_t FindEvents(const Query& query, TraceEventVector* output); + + // Find first event that matches query or NULL if not found. + const TraceEvent* FindFirstOf(const Query& query); + + // Find last event that matches query or NULL if not found. + const TraceEvent* FindLastOf(const Query& query); + + const std::string& GetThreadName(const TraceEvent::ProcessThreadID& thread); + + private: + TraceAnalyzer(); + + bool SetEvents(const std::string& json_events) WARN_UNUSED_RESULT; + + // Read metadata (thread names, etc) from events. + void ParseMetadata(); + + std::map<TraceEvent::ProcessThreadID, std::string> thread_names_; + std::vector<TraceEvent> raw_events_; + bool ignore_metadata_events_; + bool allow_association_changes_; + + DISALLOW_COPY_AND_ASSIGN(TraceAnalyzer); +}; + +// Utility functions for collecting process-local traces and creating a +// |TraceAnalyzer| from the result. Please see comments in trace_config.h to +// understand how the |category_filter_string| works. Use "*" to enable all +// default categories. +void Start(const std::string& category_filter_string); +std::unique_ptr<TraceAnalyzer> Stop(); + +// Utility functions for TraceEventVector. + +struct RateStats { + double min_us; + double max_us; + double mean_us; + double standard_deviation_us; +}; + +struct RateStatsOptions { + RateStatsOptions() : trim_min(0u), trim_max(0u) {} + // After the times between events are sorted, the number of specified elements + // will be trimmed before calculating the RateStats. This is useful in cases + // where extreme outliers are tolerable and should not skew the overall + // average. + size_t trim_min; // Trim this many minimum times. + size_t trim_max; // Trim this many maximum times. +}; + +// Calculate min/max/mean and standard deviation from the times between +// adjacent events. +bool GetRateStats(const TraceEventVector& events, + RateStats* stats, + const RateStatsOptions* options); + +// Starting from |position|, find the first event that matches |query|. +// Returns true if found, false otherwise. +bool FindFirstOf(const TraceEventVector& events, + const Query& query, + size_t position, + size_t* return_index); + +// Starting from |position|, find the last event that matches |query|. +// Returns true if found, false otherwise. +bool FindLastOf(const TraceEventVector& events, + const Query& query, + size_t position, + size_t* return_index); + +// Find the closest events to |position| in time that match |query|. +// return_second_closest may be NULL. Closeness is determined by comparing +// with the event timestamp. +// Returns true if found, false otherwise. If both return parameters are +// requested, both must be found for a successful result. +bool FindClosest(const TraceEventVector& events, + const Query& query, + size_t position, + size_t* return_closest, + size_t* return_second_closest); + +// Count matches, inclusive of |begin_position|, exclusive of |end_position|. +size_t CountMatches(const TraceEventVector& events, + const Query& query, + size_t begin_position, + size_t end_position); + +// Count all matches. +static inline size_t CountMatches(const TraceEventVector& events, + const Query& query) { + return CountMatches(events, query, 0u, events.size()); +} + +} // namespace trace_analyzer + +#endif // BASE_TEST_TRACE_EVENT_ANALYZER_H_ diff --git a/chromium/base/test/trace_event_analyzer_unittest.cc b/chromium/base/test/trace_event_analyzer_unittest.cc new file mode 100644 index 00000000000..259fd95264b --- /dev/null +++ b/chromium/base/test/trace_event_analyzer_unittest.cc @@ -0,0 +1,959 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/trace_event_analyzer.h" + +#include <stddef.h> +#include <stdint.h> + +#include "base/bind.h" +#include "base/memory/ref_counted_memory.h" +#include "base/synchronization/waitable_event.h" +#include "base/threading/platform_thread.h" +#include "base/trace_event/trace_buffer.h" +#include "base/trace_event/traced_value.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace trace_analyzer { + +namespace { + +class TraceEventAnalyzerTest : public testing::Test { + public: + void ManualSetUp(); + void OnTraceDataCollected( + base::WaitableEvent* flush_complete_event, + const scoped_refptr<base::RefCountedString>& json_events_str, + bool has_more_events); + void BeginTracing(); + void EndTracing(); + + base::trace_event::TraceResultBuffer::SimpleOutput output_; + base::trace_event::TraceResultBuffer buffer_; +}; + +void TraceEventAnalyzerTest::ManualSetUp() { + ASSERT_TRUE(base::trace_event::TraceLog::GetInstance()); + buffer_.SetOutputCallback(output_.GetCallback()); + output_.json_output.clear(); +} + +void TraceEventAnalyzerTest::OnTraceDataCollected( + base::WaitableEvent* flush_complete_event, + const scoped_refptr<base::RefCountedString>& json_events_str, + bool has_more_events) { + buffer_.AddFragment(json_events_str->data()); + if (!has_more_events) + flush_complete_event->Signal(); +} + +void TraceEventAnalyzerTest::BeginTracing() { + output_.json_output.clear(); + buffer_.Start(); + base::trace_event::TraceLog::GetInstance()->SetEnabled( + base::trace_event::TraceConfig("*", ""), + base::trace_event::TraceLog::RECORDING_MODE); +} + +void TraceEventAnalyzerTest::EndTracing() { + base::trace_event::TraceLog::GetInstance()->SetDisabled(); + base::WaitableEvent flush_complete_event( + base::WaitableEvent::ResetPolicy::AUTOMATIC, + base::WaitableEvent::InitialState::NOT_SIGNALED); + base::trace_event::TraceLog::GetInstance()->Flush(base::BindRepeating( + &TraceEventAnalyzerTest::OnTraceDataCollected, base::Unretained(this), + base::Unretained(&flush_complete_event))); + flush_complete_event.Wait(); + buffer_.Finish(); +} + +} // namespace + +TEST_F(TraceEventAnalyzerTest, NoEvents) { + ManualSetUp(); + + // Create an empty JSON event string: + buffer_.Start(); + buffer_.Finish(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + + // Search for all events and verify that nothing is returned. + TraceEventVector found; + analyzer->FindEvents(Query::Bool(true), &found); + EXPECT_EQ(0u, found.size()); +} + +TEST_F(TraceEventAnalyzerTest, TraceEvent) { + ManualSetUp(); + + int int_num = 2; + double double_num = 3.5; + const char str[] = "the string"; + + TraceEvent event; + event.arg_numbers["false"] = 0.0; + event.arg_numbers["true"] = 1.0; + event.arg_numbers["int"] = static_cast<double>(int_num); + event.arg_numbers["double"] = double_num; + event.arg_strings["string"] = str; + event.arg_values["dict"] = std::make_unique<base::DictionaryValue>(); + + ASSERT_TRUE(event.HasNumberArg("false")); + ASSERT_TRUE(event.HasNumberArg("true")); + ASSERT_TRUE(event.HasNumberArg("int")); + ASSERT_TRUE(event.HasNumberArg("double")); + ASSERT_TRUE(event.HasStringArg("string")); + ASSERT_FALSE(event.HasNumberArg("notfound")); + ASSERT_FALSE(event.HasStringArg("notfound")); + ASSERT_TRUE(event.HasArg("dict")); + ASSERT_FALSE(event.HasArg("notfound")); + + EXPECT_FALSE(event.GetKnownArgAsBool("false")); + EXPECT_TRUE(event.GetKnownArgAsBool("true")); + EXPECT_EQ(int_num, event.GetKnownArgAsInt("int")); + EXPECT_EQ(double_num, event.GetKnownArgAsDouble("double")); + EXPECT_STREQ(str, event.GetKnownArgAsString("string").c_str()); + + std::unique_ptr<base::Value> arg; + EXPECT_TRUE(event.GetArgAsValue("dict", &arg)); + EXPECT_EQ(base::Value::Type::DICTIONARY, arg->type()); +} + +TEST_F(TraceEventAnalyzerTest, QueryEventMember) { + ManualSetUp(); + + TraceEvent event; + event.thread.process_id = 3; + event.thread.thread_id = 4; + event.timestamp = 1.5; + event.phase = TRACE_EVENT_PHASE_BEGIN; + event.category = "category"; + event.name = "name"; + event.id = "1"; + event.arg_numbers["num"] = 7.0; + event.arg_strings["str"] = "the string"; + + // Other event with all different members: + TraceEvent other; + other.thread.process_id = 5; + other.thread.thread_id = 6; + other.timestamp = 2.5; + other.phase = TRACE_EVENT_PHASE_END; + other.category = "category2"; + other.name = "name2"; + other.id = "2"; + other.arg_numbers["num2"] = 8.0; + other.arg_strings["str2"] = "the string 2"; + + event.other_event = &other; + ASSERT_TRUE(event.has_other_event()); + double duration = event.GetAbsTimeToOtherEvent(); + + Query event_pid = Query::EventPidIs(event.thread.process_id); + Query event_tid = Query::EventTidIs(event.thread.thread_id); + Query event_time = Query::EventTimeIs(event.timestamp); + Query event_duration = Query::EventDurationIs(duration); + Query event_phase = Query::EventPhaseIs(event.phase); + Query event_category = Query::EventCategoryIs(event.category); + Query event_name = Query::EventNameIs(event.name); + Query event_id = Query::EventIdIs(event.id); + Query event_has_arg1 = Query::EventHasNumberArg("num"); + Query event_has_arg2 = Query::EventHasStringArg("str"); + Query event_arg1 = + (Query::EventArg("num") == Query::Double(event.arg_numbers["num"])); + Query event_arg2 = + (Query::EventArg("str") == Query::String(event.arg_strings["str"])); + Query event_has_other = Query::EventHasOther(); + Query other_pid = Query::OtherPidIs(other.thread.process_id); + Query other_tid = Query::OtherTidIs(other.thread.thread_id); + Query other_time = Query::OtherTimeIs(other.timestamp); + Query other_phase = Query::OtherPhaseIs(other.phase); + Query other_category = Query::OtherCategoryIs(other.category); + Query other_name = Query::OtherNameIs(other.name); + Query other_id = Query::OtherIdIs(other.id); + Query other_has_arg1 = Query::OtherHasNumberArg("num2"); + Query other_has_arg2 = Query::OtherHasStringArg("str2"); + Query other_arg1 = + (Query::OtherArg("num2") == Query::Double(other.arg_numbers["num2"])); + Query other_arg2 = + (Query::OtherArg("str2") == Query::String(other.arg_strings["str2"])); + + EXPECT_TRUE(event_pid.Evaluate(event)); + EXPECT_TRUE(event_tid.Evaluate(event)); + EXPECT_TRUE(event_time.Evaluate(event)); + EXPECT_TRUE(event_duration.Evaluate(event)); + EXPECT_TRUE(event_phase.Evaluate(event)); + EXPECT_TRUE(event_category.Evaluate(event)); + EXPECT_TRUE(event_name.Evaluate(event)); + EXPECT_TRUE(event_id.Evaluate(event)); + EXPECT_TRUE(event_has_arg1.Evaluate(event)); + EXPECT_TRUE(event_has_arg2.Evaluate(event)); + EXPECT_TRUE(event_arg1.Evaluate(event)); + EXPECT_TRUE(event_arg2.Evaluate(event)); + EXPECT_TRUE(event_has_other.Evaluate(event)); + EXPECT_TRUE(other_pid.Evaluate(event)); + EXPECT_TRUE(other_tid.Evaluate(event)); + EXPECT_TRUE(other_time.Evaluate(event)); + EXPECT_TRUE(other_phase.Evaluate(event)); + EXPECT_TRUE(other_category.Evaluate(event)); + EXPECT_TRUE(other_name.Evaluate(event)); + EXPECT_TRUE(other_id.Evaluate(event)); + EXPECT_TRUE(other_has_arg1.Evaluate(event)); + EXPECT_TRUE(other_has_arg2.Evaluate(event)); + EXPECT_TRUE(other_arg1.Evaluate(event)); + EXPECT_TRUE(other_arg2.Evaluate(event)); + + // Evaluate event queries against other to verify the queries fail when the + // event members are wrong. + EXPECT_FALSE(event_pid.Evaluate(other)); + EXPECT_FALSE(event_tid.Evaluate(other)); + EXPECT_FALSE(event_time.Evaluate(other)); + EXPECT_FALSE(event_duration.Evaluate(other)); + EXPECT_FALSE(event_phase.Evaluate(other)); + EXPECT_FALSE(event_category.Evaluate(other)); + EXPECT_FALSE(event_name.Evaluate(other)); + EXPECT_FALSE(event_id.Evaluate(other)); + EXPECT_FALSE(event_has_arg1.Evaluate(other)); + EXPECT_FALSE(event_has_arg2.Evaluate(other)); + EXPECT_FALSE(event_arg1.Evaluate(other)); + EXPECT_FALSE(event_arg2.Evaluate(other)); + EXPECT_FALSE(event_has_other.Evaluate(other)); +} + +TEST_F(TraceEventAnalyzerTest, BooleanOperators) { + ManualSetUp(); + + BeginTracing(); + { + TRACE_EVENT_INSTANT1("cat1", "name1", TRACE_EVENT_SCOPE_THREAD, "num", 1); + TRACE_EVENT_INSTANT1("cat1", "name2", TRACE_EVENT_SCOPE_THREAD, "num", 2); + TRACE_EVENT_INSTANT1("cat2", "name3", TRACE_EVENT_SCOPE_THREAD, "num", 3); + TRACE_EVENT_INSTANT1("cat2", "name4", TRACE_EVENT_SCOPE_THREAD, "num", 4); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer); + analyzer->SetIgnoreMetadataEvents(true); + + TraceEventVector found; + + // == + + analyzer->FindEvents(Query::EventCategory() == Query::String("cat1"), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name2", found[1]->name.c_str()); + + analyzer->FindEvents(Query::EventArg("num") == Query::Int(2), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("name2", found[0]->name.c_str()); + + // != + + analyzer->FindEvents(Query::EventCategory() != Query::String("cat1"), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name3", found[0]->name.c_str()); + EXPECT_STREQ("name4", found[1]->name.c_str()); + + analyzer->FindEvents(Query::EventArg("num") != Query::Int(2), &found); + ASSERT_EQ(3u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name3", found[1]->name.c_str()); + EXPECT_STREQ("name4", found[2]->name.c_str()); + + // < + analyzer->FindEvents(Query::EventArg("num") < Query::Int(2), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + + // <= + analyzer->FindEvents(Query::EventArg("num") <= Query::Int(2), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name2", found[1]->name.c_str()); + + // > + analyzer->FindEvents(Query::EventArg("num") > Query::Int(3), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("name4", found[0]->name.c_str()); + + // >= + analyzer->FindEvents(Query::EventArg("num") >= Query::Int(4), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("name4", found[0]->name.c_str()); + + // && + analyzer->FindEvents(Query::EventName() != Query::String("name1") && + Query::EventArg("num") < Query::Int(3), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("name2", found[0]->name.c_str()); + + // || + analyzer->FindEvents(Query::EventName() == Query::String("name1") || + Query::EventArg("num") == Query::Int(3), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name3", found[1]->name.c_str()); + + // ! + analyzer->FindEvents(!(Query::EventName() == Query::String("name1") || + Query::EventArg("num") == Query::Int(3)), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name2", found[0]->name.c_str()); + EXPECT_STREQ("name4", found[1]->name.c_str()); +} + +TEST_F(TraceEventAnalyzerTest, ArithmeticOperators) { + ManualSetUp(); + + BeginTracing(); + { + // These events are searched for: + TRACE_EVENT_INSTANT2("cat1", "math1", TRACE_EVENT_SCOPE_THREAD, + "a", 10, "b", 5); + TRACE_EVENT_INSTANT2("cat1", "math2", TRACE_EVENT_SCOPE_THREAD, + "a", 10, "b", 10); + // Extra events that never match, for noise: + TRACE_EVENT_INSTANT2("noise", "math3", TRACE_EVENT_SCOPE_THREAD, + "a", 1, "b", 3); + TRACE_EVENT_INSTANT2("noise", "math4", TRACE_EVENT_SCOPE_THREAD, + "c", 10, "d", 5); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + + TraceEventVector found; + + // Verify that arithmetic operators function: + + // + + analyzer->FindEvents(Query::EventArg("a") + Query::EventArg("b") == + Query::Int(20), &found); + EXPECT_EQ(1u, found.size()); + EXPECT_STREQ("math2", found.front()->name.c_str()); + + // - + analyzer->FindEvents(Query::EventArg("a") - Query::EventArg("b") == + Query::Int(5), &found); + EXPECT_EQ(1u, found.size()); + EXPECT_STREQ("math1", found.front()->name.c_str()); + + // * + analyzer->FindEvents(Query::EventArg("a") * Query::EventArg("b") == + Query::Int(50), &found); + EXPECT_EQ(1u, found.size()); + EXPECT_STREQ("math1", found.front()->name.c_str()); + + // / + analyzer->FindEvents(Query::EventArg("a") / Query::EventArg("b") == + Query::Int(2), &found); + EXPECT_EQ(1u, found.size()); + EXPECT_STREQ("math1", found.front()->name.c_str()); + + // % + analyzer->FindEvents(Query::EventArg("a") % Query::EventArg("b") == + Query::Int(0), &found); + EXPECT_EQ(2u, found.size()); + + // - (negate) + analyzer->FindEvents(-Query::EventArg("b") == Query::Int(-10), &found); + EXPECT_EQ(1u, found.size()); + EXPECT_STREQ("math2", found.front()->name.c_str()); +} + +TEST_F(TraceEventAnalyzerTest, StringPattern) { + ManualSetUp(); + + BeginTracing(); + { + TRACE_EVENT_INSTANT0("cat1", "name1", TRACE_EVENT_SCOPE_THREAD); + TRACE_EVENT_INSTANT0("cat1", "name2", TRACE_EVENT_SCOPE_THREAD); + TRACE_EVENT_INSTANT0("cat1", "no match", TRACE_EVENT_SCOPE_THREAD); + TRACE_EVENT_INSTANT0("cat1", "name3x", TRACE_EVENT_SCOPE_THREAD); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->SetIgnoreMetadataEvents(true); + + TraceEventVector found; + + analyzer->FindEvents(Query::EventName() == Query::Pattern("name?"), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name2", found[1]->name.c_str()); + + analyzer->FindEvents(Query::EventName() == Query::Pattern("name*"), &found); + ASSERT_EQ(3u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name2", found[1]->name.c_str()); + EXPECT_STREQ("name3x", found[2]->name.c_str()); + + analyzer->FindEvents(Query::EventName() != Query::Pattern("name*"), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("no match", found[0]->name.c_str()); +} + +// Test that duration queries work. +TEST_F(TraceEventAnalyzerTest, BeginEndDuration) { + ManualSetUp(); + + const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(200); + // We will search for events that have a duration of greater than 90% of the + // sleep time, so that there is no flakiness. + int64_t duration_cutoff_us = (kSleepTime.InMicroseconds() * 9) / 10; + + BeginTracing(); + { + TRACE_EVENT_BEGIN0("cat1", "name1"); // found by duration query + TRACE_EVENT_BEGIN0("noise", "name2"); // not searched for, just noise + { + TRACE_EVENT_BEGIN0("cat2", "name3"); // found by duration query + // next event not searched for, just noise + TRACE_EVENT_INSTANT0("noise", "name4", TRACE_EVENT_SCOPE_THREAD); + base::PlatformThread::Sleep(kSleepTime); + TRACE_EVENT_BEGIN0("cat2", "name5"); // not found (duration too short) + TRACE_EVENT_END0("cat2", "name5"); // not found (duration too short) + TRACE_EVENT_END0("cat2", "name3"); // found by duration query + } + TRACE_EVENT_END0("noise", "name2"); // not searched for, just noise + TRACE_EVENT_END0("cat1", "name1"); // found by duration query + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->AssociateBeginEndEvents(); + + TraceEventVector found; + analyzer->FindEvents( + Query::MatchBeginWithEnd() && + Query::EventDuration() > + Query::Int(static_cast<int>(duration_cutoff_us)) && + (Query::EventCategory() == Query::String("cat1") || + Query::EventCategory() == Query::String("cat2") || + Query::EventCategory() == Query::String("cat3")), + &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name3", found[1]->name.c_str()); +} + +// Test that duration queries work. +TEST_F(TraceEventAnalyzerTest, CompleteDuration) { + ManualSetUp(); + + const base::TimeDelta kSleepTime = base::TimeDelta::FromMilliseconds(200); + // We will search for events that have a duration of greater than 90% of the + // sleep time, so that there is no flakiness. + int64_t duration_cutoff_us = (kSleepTime.InMicroseconds() * 9) / 10; + + BeginTracing(); + { + TRACE_EVENT0("cat1", "name1"); // found by duration query + TRACE_EVENT0("noise", "name2"); // not searched for, just noise + { + TRACE_EVENT0("cat2", "name3"); // found by duration query + // next event not searched for, just noise + TRACE_EVENT_INSTANT0("noise", "name4", TRACE_EVENT_SCOPE_THREAD); + base::PlatformThread::Sleep(kSleepTime); + TRACE_EVENT0("cat2", "name5"); // not found (duration too short) + } + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->AssociateBeginEndEvents(); + + TraceEventVector found; + analyzer->FindEvents( + Query::EventCompleteDuration() > + Query::Int(static_cast<int>(duration_cutoff_us)) && + (Query::EventCategory() == Query::String("cat1") || + Query::EventCategory() == Query::String("cat2") || + Query::EventCategory() == Query::String("cat3")), + &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STREQ("name1", found[0]->name.c_str()); + EXPECT_STREQ("name3", found[1]->name.c_str()); +} + +// Test AssociateBeginEndEvents +TEST_F(TraceEventAnalyzerTest, BeginEndAssocations) { + ManualSetUp(); + + BeginTracing(); + { + TRACE_EVENT_END0("cat1", "name1"); // does not match out of order begin + TRACE_EVENT_BEGIN0("cat1", "name2"); + TRACE_EVENT_INSTANT0("cat1", "name3", TRACE_EVENT_SCOPE_THREAD); + TRACE_EVENT_BEGIN0("cat1", "name1"); + TRACE_EVENT_END0("cat1", "name2"); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->AssociateBeginEndEvents(); + + TraceEventVector found; + analyzer->FindEvents(Query::MatchBeginWithEnd(), &found); + ASSERT_EQ(1u, found.size()); + EXPECT_STREQ("name2", found[0]->name.c_str()); +} + +// Test MergeAssociatedEventArgs +TEST_F(TraceEventAnalyzerTest, MergeAssociatedEventArgs) { + ManualSetUp(); + + const char arg_string[] = "arg_string"; + BeginTracing(); + { + TRACE_EVENT_BEGIN0("cat1", "name1"); + TRACE_EVENT_END1("cat1", "name1", "arg", arg_string); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->AssociateBeginEndEvents(); + + TraceEventVector found; + analyzer->FindEvents(Query::MatchBeginName("name1"), &found); + ASSERT_EQ(1u, found.size()); + std::string arg_actual; + EXPECT_FALSE(found[0]->GetArgAsString("arg", &arg_actual)); + + analyzer->MergeAssociatedEventArgs(); + EXPECT_TRUE(found[0]->GetArgAsString("arg", &arg_actual)); + EXPECT_STREQ(arg_string, arg_actual.c_str()); +} + +// Test AssociateAsyncBeginEndEvents +TEST_F(TraceEventAnalyzerTest, AsyncBeginEndAssocations) { + ManualSetUp(); + + BeginTracing(); + { + TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xA); // no match / out of order + TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xB); + TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xC); + TRACE_EVENT_INSTANT0("cat1", "name1", TRACE_EVENT_SCOPE_THREAD); // noise + TRACE_EVENT0("cat1", "name1"); // noise + TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xB); + TRACE_EVENT_ASYNC_END0("cat1", "name1", 0xC); + TRACE_EVENT_ASYNC_BEGIN0("cat1", "name1", 0xA); // no match / out of order + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->AssociateAsyncBeginEndEvents(); + + TraceEventVector found; + analyzer->FindEvents(Query::MatchAsyncBeginWithNext(), &found); + ASSERT_EQ(2u, found.size()); + EXPECT_STRCASEEQ("0xb", found[0]->id.c_str()); + EXPECT_STRCASEEQ("0xc", found[1]->id.c_str()); +} + +// Test AssociateAsyncBeginEndEvents +TEST_F(TraceEventAnalyzerTest, AsyncBeginEndAssocationsWithSteps) { + ManualSetUp(); + + BeginTracing(); + { + TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xA, "s1"); + TRACE_EVENT_ASYNC_END0("c", "n", 0xA); + TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xB); + TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xC); + TRACE_EVENT_ASYNC_STEP_PAST0("c", "n", 0xB, "s1"); + TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xC, "s1"); + TRACE_EVENT_ASYNC_STEP_INTO1("c", "n", 0xC, "s2", "a", 1); + TRACE_EVENT_ASYNC_END0("c", "n", 0xB); + TRACE_EVENT_ASYNC_END0("c", "n", 0xC); + TRACE_EVENT_ASYNC_BEGIN0("c", "n", 0xA); + TRACE_EVENT_ASYNC_STEP_INTO0("c", "n", 0xA, "s2"); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + analyzer->AssociateAsyncBeginEndEvents(); + + TraceEventVector found; + analyzer->FindEvents(Query::MatchAsyncBeginWithNext(), &found); + ASSERT_EQ(3u, found.size()); + + EXPECT_STRCASEEQ("0xb", found[0]->id.c_str()); + EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, found[0]->other_event->phase); + EXPECT_EQ(found[0], found[0]->other_event->prev_event); + EXPECT_TRUE(found[0]->other_event->other_event); + EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_END, + found[0]->other_event->other_event->phase); + EXPECT_EQ(found[0]->other_event, + found[0]->other_event->other_event->prev_event); + + EXPECT_STRCASEEQ("0xc", found[1]->id.c_str()); + EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, found[1]->other_event->phase); + EXPECT_EQ(found[1], found[1]->other_event->prev_event); + EXPECT_TRUE(found[1]->other_event->other_event); + EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, + found[1]->other_event->other_event->phase); + EXPECT_EQ(found[1]->other_event, + found[1]->other_event->other_event->prev_event); + double arg_actual = 0; + EXPECT_TRUE(found[1]->other_event->other_event->GetArgAsNumber( + "a", &arg_actual)); + EXPECT_EQ(1.0, arg_actual); + EXPECT_TRUE(found[1]->other_event->other_event->other_event); + EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_END, + found[1]->other_event->other_event->other_event->phase); + + EXPECT_STRCASEEQ("0xa", found[2]->id.c_str()); + EXPECT_EQ(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, found[2]->other_event->phase); +} + +// Test that the TraceAnalyzer custom associations work. +TEST_F(TraceEventAnalyzerTest, CustomAssociations) { + ManualSetUp(); + + // Add events that begin/end in pipelined ordering with unique ID parameter + // to match up the begin/end pairs. + BeginTracing(); + { + // no begin match + TRACE_EVENT_INSTANT1("cat1", "end", TRACE_EVENT_SCOPE_THREAD, "id", 1); + // end is cat4 + TRACE_EVENT_INSTANT1("cat2", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 2); + // end is cat5 + TRACE_EVENT_INSTANT1("cat3", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 3); + TRACE_EVENT_INSTANT1("cat4", "end", TRACE_EVENT_SCOPE_THREAD, "id", 2); + TRACE_EVENT_INSTANT1("cat5", "end", TRACE_EVENT_SCOPE_THREAD, "id", 3); + // no end match + TRACE_EVENT_INSTANT1("cat6", "begin", TRACE_EVENT_SCOPE_THREAD, "id", 1); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + + // begin, end, and match queries to find proper begin/end pairs. + Query begin(Query::EventName() == Query::String("begin")); + Query end(Query::EventName() == Query::String("end")); + Query match(Query::EventArg("id") == Query::OtherArg("id")); + analyzer->AssociateEvents(begin, end, match); + + TraceEventVector found; + + // cat1 has no other_event. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat1") && + Query::EventHasOther(), &found); + EXPECT_EQ(0u, found.size()); + + // cat1 has no other_event. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat1") && + !Query::EventHasOther(), &found); + EXPECT_EQ(1u, found.size()); + + // cat6 has no other_event. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat6") && + !Query::EventHasOther(), &found); + EXPECT_EQ(1u, found.size()); + + // cat2 and cat4 are associated. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat2") && + Query::OtherCategory() == Query::String("cat4"), &found); + EXPECT_EQ(1u, found.size()); + + // cat4 and cat2 are not associated. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat4") && + Query::OtherCategory() == Query::String("cat2"), &found); + EXPECT_EQ(0u, found.size()); + + // cat3 and cat5 are associated. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat3") && + Query::OtherCategory() == Query::String("cat5"), &found); + EXPECT_EQ(1u, found.size()); + + // cat5 and cat3 are not associated. + analyzer->FindEvents(Query::EventCategory() == Query::String("cat5") && + Query::OtherCategory() == Query::String("cat3"), &found); + EXPECT_EQ(0u, found.size()); +} + +// Verify that Query literals and types are properly casted. +TEST_F(TraceEventAnalyzerTest, Literals) { + ManualSetUp(); + + // Since these queries don't refer to the event data, the dummy event below + // will never be accessed. + TraceEvent dummy; + char char_num = 5; + short short_num = -5; + EXPECT_TRUE((Query::Double(5.0) == Query::Int(char_num)).Evaluate(dummy)); + EXPECT_TRUE((Query::Double(-5.0) == Query::Int(short_num)).Evaluate(dummy)); + EXPECT_TRUE((Query::Double(1.0) == Query::Uint(1u)).Evaluate(dummy)); + EXPECT_TRUE((Query::Double(1.0) == Query::Int(1)).Evaluate(dummy)); + EXPECT_TRUE((Query::Double(-1.0) == Query::Int(-1)).Evaluate(dummy)); + EXPECT_TRUE((Query::Double(1.0) == Query::Double(1.0f)).Evaluate(dummy)); + EXPECT_TRUE((Query::Bool(true) == Query::Int(1)).Evaluate(dummy)); + EXPECT_TRUE((Query::Bool(false) == Query::Int(0)).Evaluate(dummy)); + EXPECT_TRUE((Query::Bool(true) == Query::Double(1.0f)).Evaluate(dummy)); + EXPECT_TRUE((Query::Bool(false) == Query::Double(0.0f)).Evaluate(dummy)); +} + +// Test GetRateStats. +TEST_F(TraceEventAnalyzerTest, RateStats) { + std::vector<TraceEvent> events; + events.reserve(100); + TraceEventVector event_ptrs; + double timestamp = 0.0; + double little_delta = 1.0; + double big_delta = 10.0; + double tiny_delta = 0.1; + RateStats stats; + RateStatsOptions options; + + // Insert 10 events, each apart by little_delta. + for (int i = 0; i < 10; ++i) { + timestamp += little_delta; + TraceEvent event; + event.timestamp = timestamp; + events.push_back(std::move(event)); + event_ptrs.push_back(&events.back()); + } + + ASSERT_TRUE(GetRateStats(event_ptrs, &stats, nullptr)); + EXPECT_EQ(little_delta, stats.mean_us); + EXPECT_EQ(little_delta, stats.min_us); + EXPECT_EQ(little_delta, stats.max_us); + EXPECT_EQ(0.0, stats.standard_deviation_us); + + // Add an event apart by big_delta. + { + timestamp += big_delta; + TraceEvent event; + event.timestamp = timestamp; + events.push_back(std::move(event)); + event_ptrs.push_back(&events.back()); + } + + ASSERT_TRUE(GetRateStats(event_ptrs, &stats, nullptr)); + EXPECT_LT(little_delta, stats.mean_us); + EXPECT_EQ(little_delta, stats.min_us); + EXPECT_EQ(big_delta, stats.max_us); + EXPECT_LT(0.0, stats.standard_deviation_us); + + // Trim off the biggest delta and verify stats. + options.trim_min = 0; + options.trim_max = 1; + ASSERT_TRUE(GetRateStats(event_ptrs, &stats, &options)); + EXPECT_EQ(little_delta, stats.mean_us); + EXPECT_EQ(little_delta, stats.min_us); + EXPECT_EQ(little_delta, stats.max_us); + EXPECT_EQ(0.0, stats.standard_deviation_us); + + // Add an event apart by tiny_delta. + { + timestamp += tiny_delta; + TraceEvent event; + event.timestamp = timestamp; + events.push_back(std::move(event)); + event_ptrs.push_back(&events.back()); + } + + // Trim off both the biggest and tiniest delta and verify stats. + options.trim_min = 1; + options.trim_max = 1; + ASSERT_TRUE(GetRateStats(event_ptrs, &stats, &options)); + EXPECT_EQ(little_delta, stats.mean_us); + EXPECT_EQ(little_delta, stats.min_us); + EXPECT_EQ(little_delta, stats.max_us); + EXPECT_EQ(0.0, stats.standard_deviation_us); + + // Verify smallest allowed number of events. + { + TraceEvent event; + TraceEventVector few_event_ptrs; + few_event_ptrs.push_back(&event); + few_event_ptrs.push_back(&event); + ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, nullptr)); + few_event_ptrs.push_back(&event); + ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, nullptr)); + + // Trim off more than allowed and verify failure. + options.trim_min = 0; + options.trim_max = 1; + ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options)); + } +} + +// Test FindFirstOf and FindLastOf. +TEST_F(TraceEventAnalyzerTest, FindOf) { + size_t num_events = 100; + size_t index = 0; + TraceEventVector event_ptrs; + EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(true), 0, &index)); + EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(true), 10, &index)); + EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(true), 0, &index)); + EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(true), 10, &index)); + + std::vector<TraceEvent> events; + events.resize(num_events); + for (auto& i : events) + event_ptrs.push_back(&i); + size_t bam_index = num_events/2; + events[bam_index].name = "bam"; + Query query_bam = Query::EventName() == Query::String(events[bam_index].name); + + // FindFirstOf + EXPECT_FALSE(FindFirstOf(event_ptrs, Query::Bool(false), 0, &index)); + EXPECT_TRUE(FindFirstOf(event_ptrs, Query::Bool(true), 0, &index)); + EXPECT_EQ(0u, index); + EXPECT_TRUE(FindFirstOf(event_ptrs, Query::Bool(true), 5, &index)); + EXPECT_EQ(5u, index); + + EXPECT_FALSE(FindFirstOf(event_ptrs, query_bam, bam_index + 1, &index)); + EXPECT_TRUE(FindFirstOf(event_ptrs, query_bam, 0, &index)); + EXPECT_EQ(bam_index, index); + EXPECT_TRUE(FindFirstOf(event_ptrs, query_bam, bam_index, &index)); + EXPECT_EQ(bam_index, index); + + // FindLastOf + EXPECT_FALSE(FindLastOf(event_ptrs, Query::Bool(false), 1000, &index)); + EXPECT_TRUE(FindLastOf(event_ptrs, Query::Bool(true), 1000, &index)); + EXPECT_EQ(num_events - 1, index); + EXPECT_TRUE(FindLastOf(event_ptrs, Query::Bool(true), num_events - 5, + &index)); + EXPECT_EQ(num_events - 5, index); + + EXPECT_FALSE(FindLastOf(event_ptrs, query_bam, bam_index - 1, &index)); + EXPECT_TRUE(FindLastOf(event_ptrs, query_bam, num_events, &index)); + EXPECT_EQ(bam_index, index); + EXPECT_TRUE(FindLastOf(event_ptrs, query_bam, bam_index, &index)); + EXPECT_EQ(bam_index, index); +} + +// Test FindClosest. +TEST_F(TraceEventAnalyzerTest, FindClosest) { + size_t index_1 = 0; + size_t index_2 = 0; + TraceEventVector event_ptrs; + EXPECT_FALSE(FindClosest(event_ptrs, Query::Bool(true), 0, + &index_1, &index_2)); + + size_t num_events = 5; + std::vector<TraceEvent> events; + events.resize(num_events); + for (size_t i = 0; i < events.size(); ++i) { + // timestamps go up exponentially so the lower index is always closer in + // time than the higher index. + events[i].timestamp = static_cast<double>(i) * static_cast<double>(i); + event_ptrs.push_back(&events[i]); + } + events[0].name = "one"; + events[2].name = "two"; + events[4].name = "three"; + Query query_named = Query::EventName() != Query::String(std::string()); + Query query_one = Query::EventName() == Query::String("one"); + + // Only one event matches query_one, so two closest can't be found. + EXPECT_FALSE(FindClosest(event_ptrs, query_one, 0, &index_1, &index_2)); + + EXPECT_TRUE(FindClosest(event_ptrs, query_one, 3, &index_1, nullptr)); + EXPECT_EQ(0u, index_1); + + EXPECT_TRUE(FindClosest(event_ptrs, query_named, 1, &index_1, &index_2)); + EXPECT_EQ(0u, index_1); + EXPECT_EQ(2u, index_2); + + EXPECT_TRUE(FindClosest(event_ptrs, query_named, 4, &index_1, &index_2)); + EXPECT_EQ(4u, index_1); + EXPECT_EQ(2u, index_2); + + EXPECT_TRUE(FindClosest(event_ptrs, query_named, 3, &index_1, &index_2)); + EXPECT_EQ(2u, index_1); + EXPECT_EQ(0u, index_2); +} + +// Test CountMatches. +TEST_F(TraceEventAnalyzerTest, CountMatches) { + TraceEventVector event_ptrs; + EXPECT_EQ(0u, CountMatches(event_ptrs, Query::Bool(true), 0, 10)); + + size_t num_events = 5; + size_t num_named = 3; + std::vector<TraceEvent> events; + events.resize(num_events); + for (auto& i : events) + event_ptrs.push_back(&i); + events[0].name = "one"; + events[2].name = "two"; + events[4].name = "three"; + Query query_named = Query::EventName() != Query::String(std::string()); + Query query_one = Query::EventName() == Query::String("one"); + + EXPECT_EQ(0u, CountMatches(event_ptrs, Query::Bool(false))); + EXPECT_EQ(num_events, CountMatches(event_ptrs, Query::Bool(true))); + EXPECT_EQ(num_events - 1, CountMatches(event_ptrs, Query::Bool(true), + 1, num_events)); + EXPECT_EQ(1u, CountMatches(event_ptrs, query_one)); + EXPECT_EQ(num_events - 1, CountMatches(event_ptrs, !query_one)); + EXPECT_EQ(num_named, CountMatches(event_ptrs, query_named)); +} + +TEST_F(TraceEventAnalyzerTest, ComplexArgument) { + ManualSetUp(); + + BeginTracing(); + { + std::unique_ptr<base::trace_event::TracedValue> value( + new base::trace_event::TracedValue); + value->SetString("property", "value"); + TRACE_EVENT1("cat", "name", "arg", std::move(value)); + } + EndTracing(); + + std::unique_ptr<TraceAnalyzer> analyzer( + TraceAnalyzer::Create(output_.json_output)); + ASSERT_TRUE(analyzer.get()); + + TraceEventVector events; + analyzer->FindEvents(Query::EventName() == Query::String("name"), &events); + + EXPECT_EQ(1u, events.size()); + EXPECT_EQ("cat", events[0]->category); + EXPECT_EQ("name", events[0]->name); + EXPECT_TRUE(events[0]->HasArg("arg")); + + std::unique_ptr<base::Value> arg; + events[0]->GetArgAsValue("arg", &arg); + base::DictionaryValue* arg_dict; + EXPECT_TRUE(arg->GetAsDictionary(&arg_dict)); + std::string property; + EXPECT_TRUE(arg_dict->GetString("property", &property)); + EXPECT_EQ("value", property); +} + +} // namespace trace_analyzer diff --git a/chromium/base/test/trace_to_file.cc b/chromium/base/test/trace_to_file.cc new file mode 100644 index 00000000000..5ada1d00c2a --- /dev/null +++ b/chromium/base/test/trace_to_file.cc @@ -0,0 +1,112 @@ +// Copyright (c) 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/trace_to_file.h" + +#include "base/base_switches.h" +#include "base/bind.h" +#include "base/command_line.h" +#include "base/files/file_util.h" +#include "base/memory/ref_counted_memory.h" +#include "base/run_loop.h" +#include "base/test/task_environment.h" +#include "base/threading/thread_task_runner_handle.h" +#include "base/trace_event/trace_buffer.h" +#include "base/trace_event/trace_log.h" + +namespace base { +namespace test { + +TraceToFile::TraceToFile() : started_(false) { +} + +TraceToFile::~TraceToFile() { + EndTracingIfNeeded(); +} + +void TraceToFile::BeginTracingFromCommandLineOptions() { + DCHECK(CommandLine::InitializedForCurrentProcess()); + DCHECK(!started_); + + if (!CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToFile)) + return; + + // Empty filter (i.e. just --trace-to-file) turns into default categories in + // TraceEventImpl + std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII( + switches::kTraceToFile); + + FilePath path; + if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToFileName)) { + path = FilePath(CommandLine::ForCurrentProcess() + ->GetSwitchValuePath(switches::kTraceToFileName)); + } else { + path = FilePath(FILE_PATH_LITERAL("trace.json")); + } + + BeginTracing(path, filter); +} + +void TraceToFile::BeginTracing(const FilePath& path, + const std::string& categories) { + DCHECK(!started_); + started_ = true; + path_ = path; + WriteFileHeader(); + + trace_event::TraceLog::GetInstance()->SetEnabled( + trace_event::TraceConfig(categories, trace_event::RECORD_UNTIL_FULL), + trace_event::TraceLog::RECORDING_MODE); +} + +void TraceToFile::WriteFileHeader() { + WriteFile(path_, "{\"traceEvents\": ["); +} + +void TraceToFile::AppendFileFooter() { + const char str[] = "]}"; + AppendToFile(path_, str, static_cast<int>(strlen(str))); +} + +void TraceToFile::TraceOutputCallback(const std::string& data) { + bool ret = AppendToFile(path_, data.c_str(), static_cast<int>(data.size())); + DCHECK(ret); +} + +static void OnTraceDataCollected( + OnceClosure quit_closure, + trace_event::TraceResultBuffer* buffer, + const scoped_refptr<RefCountedString>& json_events_str, + bool has_more_events) { + buffer->AddFragment(json_events_str->data()); + if (!has_more_events) + std::move(quit_closure).Run(); +} + +void TraceToFile::EndTracingIfNeeded() { + if (!started_) + return; + started_ = false; + + trace_event::TraceLog::GetInstance()->SetDisabled(); + + trace_event::TraceResultBuffer buffer; + buffer.SetOutputCallback( + BindRepeating(&TraceToFile::TraceOutputCallback, Unretained(this))); + + // In tests we might not have a TaskEnvironment, create one if needed. + std::unique_ptr<SingleThreadTaskEnvironment> task_environment; + if (!ThreadTaskRunnerHandle::IsSet()) + task_environment = std::make_unique<SingleThreadTaskEnvironment>(); + + RunLoop run_loop; + trace_event::TraceLog::GetInstance()->Flush(BindRepeating( + &OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer))); + run_loop.Run(); + + AppendFileFooter(); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/trace_to_file.h b/chromium/base/test/trace_to_file.h new file mode 100644 index 00000000000..43087367c30 --- /dev/null +++ b/chromium/base/test/trace_to_file.h @@ -0,0 +1,35 @@ +// Copyright (c) 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_TRACE_TO_FILE_H_ +#define BASE_TEST_TRACE_TO_FILE_H_ + +#include "base/files/file_path.h" + +namespace base { +namespace test { + +class TraceToFile { + public: + TraceToFile(); + ~TraceToFile(); + + void BeginTracingFromCommandLineOptions(); + void BeginTracing(const base::FilePath& path, const std::string& categories); + void EndTracingIfNeeded(); + + private: + void WriteFileHeader(); + void AppendFileFooter(); + + void TraceOutputCallback(const std::string& data); + + base::FilePath path_; + bool started_; +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_TRACE_TO_FILE_H_ diff --git a/chromium/base/test/values_test_util.cc b/chromium/base/test/values_test_util.cc new file mode 100644 index 00000000000..5016d5698a2 --- /dev/null +++ b/chromium/base/test/values_test_util.cc @@ -0,0 +1,247 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/values_test_util.h" + +#include <ostream> +#include <utility> + +#include "base/json/json_reader.h" +#include "base/json/json_writer.h" +#include "base/memory/ptr_util.h" +#include "base/strings/string_number_conversions.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { + +void ExpectDictBooleanValue(bool expected_value, + const DictionaryValue& value, + const std::string& key) { + bool boolean_value = false; + EXPECT_TRUE(value.GetBoolean(key, &boolean_value)) << key; + EXPECT_EQ(expected_value, boolean_value) << key; +} + +void ExpectDictDictionaryValue(const DictionaryValue& expected_value, + const DictionaryValue& value, + const std::string& key) { + const DictionaryValue* dict_value = nullptr; + EXPECT_TRUE(value.GetDictionary(key, &dict_value)) << key; + EXPECT_EQ(expected_value, *dict_value) << key; +} + +void ExpectDictIntegerValue(int expected_value, + const DictionaryValue& value, + const std::string& key) { + int integer_value = 0; + EXPECT_TRUE(value.GetInteger(key, &integer_value)) << key; + EXPECT_EQ(expected_value, integer_value) << key; +} + +void ExpectDictListValue(const ListValue& expected_value, + const DictionaryValue& value, + const std::string& key) { + const ListValue* list_value = nullptr; + EXPECT_TRUE(value.GetList(key, &list_value)) << key; + EXPECT_EQ(expected_value, *list_value) << key; +} + +void ExpectDictStringValue(const std::string& expected_value, + const DictionaryValue& value, + const std::string& key) { + std::string string_value; + EXPECT_TRUE(value.GetString(key, &string_value)) << key; + EXPECT_EQ(expected_value, string_value) << key; +} + +void ExpectStringValue(const std::string& expected_str, const Value& actual) { + EXPECT_EQ(Value::Type::STRING, actual.type()); + EXPECT_EQ(expected_str, actual.GetString()); +} + +namespace test { + +namespace { + +std::string FormatAsJSON(const base::Value& value) { + std::string json; + JSONWriter::Write(value, &json); + return json; +} + +class DictionaryHasValueMatcher + : public testing::MatcherInterface<const base::Value&> { + public: + DictionaryHasValueMatcher(const std::string& key, + const base::Value& expected_value) + : key_(key), expected_value_(expected_value.Clone()) {} + + ~DictionaryHasValueMatcher() = default; + + bool MatchAndExplain(const base::Value& value, + testing::MatchResultListener* listener) const override { + if (!value.is_dict()) { + *listener << "The value '" << FormatAsJSON(value) + << "' is not a dictionary"; + return false; + } + const base::Value* sub_value = value.FindKey(key_); + if (!sub_value) { + *listener << "Dictionary '" << FormatAsJSON(value) + << "' does not have key '" << key_ << "'"; + return false; + } + if (*sub_value != expected_value_) { + *listener << "Dictionary value under key '" << key_ << "' is '" + << FormatAsJSON(*sub_value) << "', expected '" + << FormatAsJSON(expected_value_) << "'"; + return false; + } + return true; + } + + void DescribeTo(std::ostream* os) const override { + *os << "has key '" << key_ << "' with value '" + << FormatAsJSON(expected_value_) << "'"; + } + + void DescribeNegationTo(std::ostream* os) const override { + *os << "does not have key '" << key_ << "' with value '" + << FormatAsJSON(expected_value_) << "'"; + } + + private: + DictionaryHasValueMatcher& operator=(const DictionaryHasValueMatcher& other) = + delete; + + const std::string key_; + const base::Value expected_value_; +}; + +class DictionaryHasValuesMatcher + : public testing::MatcherInterface<const base::Value&> { + public: + DictionaryHasValuesMatcher(const base::Value& template_value) + : template_value_(template_value.Clone()) { + CHECK(template_value.is_dict()); + } + + ~DictionaryHasValuesMatcher() = default; + + bool MatchAndExplain(const base::Value& value, + testing::MatchResultListener* listener) const override { + if (!value.is_dict()) { + *listener << "The value '" << FormatAsJSON(value) + << "' is not a dictionary"; + return false; + } + + bool ok = true; + for (const auto& template_dict_item : template_value_.DictItems()) { + const base::Value* sub_value = value.FindKey(template_dict_item.first); + if (!sub_value) { + *listener << "\nDictionary does not have key '" + << template_dict_item.first << "'"; + ok = false; + continue; + } + if (*sub_value != template_dict_item.second) { + *listener << "\nDictionary value under key '" + << template_dict_item.first << "' is '" + << FormatAsJSON(*sub_value) << "', expected '" + << FormatAsJSON(template_dict_item.second) << "'"; + ok = false; + } + } + return ok; + } + + void DescribeTo(std::ostream* os) const override { + *os << "contains all key-values from '" << FormatAsJSON(template_value_) + << "'"; + } + + void DescribeNegationTo(std::ostream* os) const override { + *os << "does not contain key-values from '" << FormatAsJSON(template_value_) + << "'"; + } + + private: + DictionaryHasValueMatcher& operator=(const DictionaryHasValueMatcher& other) = + delete; + + const base::Value template_value_; +}; + +} // namespace + +testing::Matcher<const base::Value&> DictionaryHasValue( + const std::string& key, + const base::Value& expected_value) { + return testing::MakeMatcher( + new DictionaryHasValueMatcher(key, expected_value)); +} + +testing::Matcher<const base::Value&> DictionaryHasValues( + const base::Value& template_value) { + return testing::MakeMatcher(new DictionaryHasValuesMatcher(template_value)); +} + +IsJsonMatcher::IsJsonMatcher(base::StringPiece json) + : expected_value_(test::ParseJson(json)) {} + +IsJsonMatcher::IsJsonMatcher(const base::Value& value) + : expected_value_(value.Clone()) {} + +IsJsonMatcher::IsJsonMatcher(const IsJsonMatcher& other) + : expected_value_(other.expected_value_.Clone()) {} + +IsJsonMatcher::~IsJsonMatcher() = default; + +bool IsJsonMatcher::MatchAndExplain( + base::StringPiece json, + testing::MatchResultListener* listener) const { + // This is almost the same logic as ParseJson, but the parser uses stricter + // options for JSON data that is assumed to be generated by the code under + // test rather than written by hand as part of a unit test. + JSONReader::ValueWithError ret = + JSONReader::ReadAndReturnValueWithError(json, JSON_PARSE_RFC); + if (!ret.value) { + *listener << "Failed to parse \"" << json << "\": " << ret.error_message; + return false; + } + return MatchAndExplain(*ret.value, listener); +} + +bool IsJsonMatcher::MatchAndExplain( + const base::Value& value, + testing::MatchResultListener* /* listener */) const { + return expected_value_ == value; +} + +void IsJsonMatcher::DescribeTo(std::ostream* os) const { + *os << "is the JSON value " << expected_value_; +} + +void IsJsonMatcher::DescribeNegationTo(std::ostream* os) const { + *os << "is not the JSON value " << expected_value_; +} + +Value ParseJson(StringPiece json) { + JSONReader::ValueWithError result = + JSONReader::ReadAndReturnValueWithError(json, JSON_ALLOW_TRAILING_COMMAS); + if (!result.value) { + ADD_FAILURE() << "Failed to parse \"" << json + << "\": " << result.error_message; + return Value(); + } + return std::move(result.value.value()); +} + +std::unique_ptr<Value> ParseJsonDeprecated(StringPiece json) { + return Value::ToUniquePtrValue(ParseJson(json)); +} + +} // namespace test +} // namespace base diff --git a/chromium/base/test/values_test_util.h b/chromium/base/test/values_test_util.h new file mode 100644 index 00000000000..7f48bfd3898 --- /dev/null +++ b/chromium/base/test/values_test_util.h @@ -0,0 +1,105 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_VALUES_TEST_UTIL_H_ +#define BASE_TEST_VALUES_TEST_UTIL_H_ + +#include <iosfwd> +#include <memory> +#include <string> + +#include "base/strings/string_piece.h" +#include "base/values.h" +#include "testing/gmock/include/gmock/gmock-matchers.h" + +namespace base { + +// All the functions below expect that the value for the given key in +// the given dictionary equals the given expected value. + +void ExpectDictBooleanValue(bool expected_value, + const DictionaryValue& value, + const std::string& key); + +void ExpectDictDictionaryValue(const DictionaryValue& expected_value, + const DictionaryValue& value, + const std::string& key); + +void ExpectDictIntegerValue(int expected_value, + const DictionaryValue& value, + const std::string& key); + +void ExpectDictListValue(const ListValue& expected_value, + const DictionaryValue& value, + const std::string& key); + +void ExpectDictStringValue(const std::string& expected_value, + const DictionaryValue& value, + const std::string& key); + +void ExpectStringValue(const std::string& expected_str, const Value& actual); + +namespace test { + +// A custom GMock matcher which matches if a base::Value is a dictionary which +// has a key |key| that is equal to |value|. +testing::Matcher<const base::Value&> DictionaryHasValue( + const std::string& key, + const base::Value& expected_value); + +// A custom GMock matcher which matches if a base::Value is a dictionary which +// contains all key/value pairs from |template_value|. +testing::Matcher<const base::Value&> DictionaryHasValues( + const base::Value& template_value); + +// A custom GMock matcher. For details, see +// https://github.com/google/googletest/blob/644319b9f06f6ca9bf69fe791be399061044bc3d/googlemock/docs/CookBook.md#writing-new-polymorphic-matchers +class IsJsonMatcher { + public: + explicit IsJsonMatcher(base::StringPiece json); + explicit IsJsonMatcher(const base::Value& value); + IsJsonMatcher(const IsJsonMatcher& other); + ~IsJsonMatcher(); + + bool MatchAndExplain(base::StringPiece json, + testing::MatchResultListener* listener) const; + bool MatchAndExplain(const base::Value& value, + testing::MatchResultListener* listener) const; + void DescribeTo(std::ostream* os) const; + void DescribeNegationTo(std::ostream* os) const; + + private: + IsJsonMatcher& operator=(const IsJsonMatcher& other) = delete; + + base::Value expected_value_; +}; + +// Creates a GMock matcher for testing equivalence of JSON values represented as +// either JSON strings or base::Value objects. Parsing of the expected value +// uses ParseJson(), which allows trailing commas for convenience. Parsing of +// the actual value follows the JSON spec strictly. +// +// Although it possible to use this matcher when the actual and expected values +// are both base::Value objects, there is no advantage in that case to using +// this matcher in place of GMock's normal equality semantics. +template <typename T> +inline testing::PolymorphicMatcher<IsJsonMatcher> IsJson(const T& value) { + return testing::MakePolymorphicMatcher(IsJsonMatcher(value)); +} + +// Parses |json| as JSON, allowing trailing commas, and returns the resulting +// value. If |json| fails to parse, causes an EXPECT failure and returns the +// Null Value. +Value ParseJson(StringPiece json); + +// DEPRECATED. +// Parses |json| as JSON, allowing trailing commas, and returns the +// resulting value. If the json fails to parse, causes an EXPECT +// failure and returns the Null Value (but never a NULL pointer). +std::unique_ptr<Value> ParseJsonDeprecated(StringPiece json); + +} // namespace test +} // namespace base + +#endif // BASE_TEST_VALUES_TEST_UTIL_H_ diff --git a/chromium/base/test/with_feature_override.cc b/chromium/base/test/with_feature_override.cc new file mode 100644 index 00000000000..7a9d78705ab --- /dev/null +++ b/chromium/base/test/with_feature_override.cc @@ -0,0 +1,32 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/test/with_feature_override.h" +#include "base/task/thread_pool/thread_pool_instance.h" + +namespace base { +namespace test { + +WithFeatureOverride::WithFeatureOverride(const base::Feature& feature) { + // Most other classes that tests inherit from start task environments. Verify + // that has not happened yet. + DCHECK(base::ThreadPoolInstance::Get() == nullptr) + << "WithFeatureOverride should be the first class a test inherits from " + "so it sets the features before any other setup is done."; + + if (GetParam()) { + scoped_feature_list_.InitAndEnableFeature(feature); + } else { + scoped_feature_list_.InitAndDisableFeature(feature); + } +} + +bool WithFeatureOverride::IsParamFeatureEnabled() { + return GetParam(); +} + +WithFeatureOverride::~WithFeatureOverride() = default; + +} // namespace test +} // namespace base diff --git a/chromium/base/test/with_feature_override.h b/chromium/base/test/with_feature_override.h new file mode 100644 index 00000000000..9a88253c30c --- /dev/null +++ b/chromium/base/test/with_feature_override.h @@ -0,0 +1,55 @@ +// Copyright 2020 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_TEST_WITH_FEATURE_OVERRIDE_H_ +#define BASE_TEST_WITH_FEATURE_OVERRIDE_H_ + +#include "base/feature_list.h" +#include "base/macros.h" +#include "base/test/scoped_feature_list.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace test { + +#define INSTANTIATE_FEATURE_OVERRIDE_TEST_SUITE(test_name) \ + INSTANTIATE_TEST_SUITE_P(All, test_name, testing::Values(false, true)) + +// Base class for a test fixture that must run with a feature enabled and +// disabled. Must be the first base class of the test fixture to take effect +// during the construction of the test fixture itself. +// +// Example usage: +// +// class MyTest : public base::WithFeatureOverride, public testing::Test { +// public: +// MyTest() : WithFeatureOverride(kMyFeature){} +// }; +// +// TEST_P(MyTest, FooBar) { +// This will run with both the kMyFeature enabled and disabled. +// } +// +// INSTANTIATE_FEATURE_OVERRIDE_TEST_SUITE(MyTest); + +class WithFeatureOverride : public testing::WithParamInterface<bool> { + public: + explicit WithFeatureOverride(const base::Feature& feature); + ~WithFeatureOverride(); + + WithFeatureOverride(const WithFeatureOverride&) = delete; + WithFeatureOverride& operator=(const WithFeatureOverride&) = delete; + + // Use to know if the configured feature provided in the ctor is enabled or + // not. + bool IsParamFeatureEnabled(); + + private: + base::test::ScopedFeatureList scoped_feature_list_; +}; + +} // namespace test +} // namespace base + +#endif // BASE_TEST_WITH_FEATURE_OVERRIDE_H_ diff --git a/chromium/base/third_party/nspr/prtime.cc b/chromium/base/third_party/nspr/prtime.cc index c125160de69..fe96724ac9a 100644 --- a/chromium/base/third_party/nspr/prtime.cc +++ b/chromium/base/third_party/nspr/prtime.cc @@ -72,6 +72,7 @@ #include "build/build_config.h" #include <errno.h> /* for EINVAL */ +#include <string.h> #include <time.h> /* @@ -1018,7 +1019,7 @@ PR_ParseTimeString( /* "-" is ignored at the beginning of a token if we have not yet parsed a year (e.g., the second "-" in "30-AUG-1966"), or if - the character after the dash is not a digit. */ + the character after the dash is not a digit. */ if (*rest == '-' && ((rest > string && isalpha((unsigned char)rest[-1]) && year < 0) || rest[1] < '0' || rest[1] > '9')) @@ -1139,7 +1140,7 @@ PR_ParseTimeString( /* * mktime will return (time_t) -1 if the input is a date * after 23:59:59, December 31, 3000, US Pacific Time (not - * UTC as documented): + * UTC as documented): * http://msdn.microsoft.com/en-us/library/d1y53h2a(VS.80).aspx * But if the year is 3001, mktime also invokes the invalid * parameter handler, causing the application to crash. This @@ -1167,7 +1168,7 @@ PR_ParseTimeString( return PR_SUCCESS; } } - + /* So mktime() can't handle this case. We assume the zone_offset for the date we are parsing is the same as the zone offset on 00:00:00 2 Jan 1970 GMT. */ diff --git a/chromium/base/threading/hang_watcher.cc b/chromium/base/threading/hang_watcher.cc index 2181e7e408e..9a4cddea653 100644 --- a/chromium/base/threading/hang_watcher.cc +++ b/chromium/base/threading/hang_watcher.cc @@ -10,22 +10,28 @@ #include "base/bind.h" #include "base/callback_helpers.h" +#include "base/debug/alias.h" +#include "base/debug/crash_logging.h" #include "base/debug/dump_without_crashing.h" #include "base/feature_list.h" #include "base/no_destructor.h" +#include "base/strings/string_number_conversions.h" #include "base/synchronization/lock.h" #include "base/synchronization/waitable_event.h" +#include "base/threading/platform_thread.h" #include "base/threading/thread_checker.h" #include "base/threading/thread_restrictions.h" +#include "base/time/default_tick_clock.h" #include "base/time/time.h" +#include "build/build_config.h" namespace base { // static -const base::Feature HangWatcher::kEnableHangWatcher{ +constexpr base::Feature HangWatcher::kEnableHangWatcher{ "EnableHangWatcher", base::FEATURE_DISABLED_BY_DEFAULT}; -const base::TimeDelta HangWatchScope::kDefaultHangWatchTime = +constexpr base::TimeDelta HangWatchScope::kDefaultHangWatchTime = base::TimeDelta::FromSeconds(10); namespace { @@ -46,6 +52,8 @@ HangWatchScope::HangWatchScope(TimeDelta timeout) { internal::HangWatchState* current_hang_watch_state = internal::HangWatchState::GetHangWatchStateForCurrentThread()->Get(); + DCHECK(timeout >= base::TimeDelta()) << "Negative timeouts are invalid."; + // TODO(crbug.com/1034046): Remove when all threads using HangWatchScope are // monitored. Thread is not monitored, noop. if (!current_hang_watch_state) { @@ -98,13 +106,13 @@ HangWatchScope::~HangWatchScope() { // and that went undetected by the HangWatcher. } -HangWatcher::HangWatcher(RepeatingClosure on_hang_closure) +HangWatcher::HangWatcher() : monitor_period_(kMonitoringPeriod), should_monitor_(WaitableEvent::ResetPolicy::AUTOMATIC), - on_hang_closure_(std::move(on_hang_closure)), - thread_(this, kThreadName) { + thread_(this, kThreadName), + tick_clock_(base::DefaultTickClock::GetInstance()) { // |thread_checker_| should not be bound to the constructing thread. - DETACH_FROM_THREAD(thread_checker_); + DETACH_FROM_THREAD(hang_watcher_thread_checker_); should_monitor_.declare_only_used_while_idle(); @@ -135,10 +143,63 @@ bool HangWatcher::IsWatchListEmpty() { return watch_states_.empty(); } +void HangWatcher::Wait() { + while (true) { + // Amount by which the actual time spent sleeping can deviate from + // the target time and still be considered timely. + constexpr base::TimeDelta wait_drift_tolerance = + base::TimeDelta::FromMilliseconds(100); + + base::TimeTicks time_before_wait = tick_clock_->NowTicks(); + + // Sleep until next scheduled monitoring or until signaled. + bool was_signaled = should_monitor_.TimedWait(monitor_period_); + + if (after_wait_callback_) { + after_wait_callback_.Run(time_before_wait); + } + + base::TimeTicks time_after_wait = tick_clock_->NowTicks(); + base::TimeDelta wait_time = time_after_wait - time_before_wait; + bool wait_was_normal = + wait_time <= (monitor_period_ + wait_drift_tolerance); + + if (!wait_was_normal) { + // If the time spent waiting was too high it might indicate the machine is + // very slow or that that it went to sleep. In any case we can't trust the + // hang watch scopes that are currently live. Update the ignore threshold + // to make sure they don't trigger a hang on subsequent monitors then keep + // waiting. + + base::AutoLock auto_lock(watch_state_lock_); + + // Find the latest deadline among the live watch states. They might change + // atomically while iterating but that's fine because if they do that + // means the new HangWatchScope was constructed very soon after the + // abnormal sleep happened and might be affected by the root cause still. + // Ignoring it is cautious and harmless. + base::TimeTicks latest_deadline; + for (const auto& state : watch_states_) { + base::TimeTicks deadline = state->GetDeadline(); + if (deadline > latest_deadline) { + latest_deadline = deadline; + } + } + + deadline_ignore_threshold_ = latest_deadline; + } + + // Stop waiting. + if (wait_was_normal || was_signaled) { + return; + } + } +} + void HangWatcher::Run() { // Monitor() should only run on |thread_|. Bind |thread_checker_| here to make // sure of that. - DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_); while (keep_monitoring_.load(std::memory_order_relaxed)) { // If there is nothing to watch sleep until there is. @@ -146,11 +207,14 @@ void HangWatcher::Run() { should_monitor_.Wait(); } else { Monitor(); + + if (after_monitor_closure_for_testing_) { + after_monitor_closure_for_testing_.Run(); + } } if (keep_monitoring_.load(std::memory_order_relaxed)) { - // Sleep until next scheduled monitoring. - should_monitor_.TimedWait(monitor_period_); + Wait(); } } } @@ -163,12 +227,9 @@ HangWatcher* HangWatcher::GetInstance() { // static void HangWatcher::RecordHang() { base::debug::DumpWithoutCrashing(); - - // Defining |inhibit_tail_call_optimization| *after* calling - // DumpWithoutCrashing() prevents tail call optimization from omitting this - // function's address on the stack. - volatile int inhibit_tail_call_optimization = __LINE__; - ALLOW_UNUSED_LOCAL(inhibit_tail_call_optimization); + // Inhibit code folding. + const int line_number = __LINE__; + base::debug::Alias(&line_number); } ScopedClosureRunner HangWatcher::RegisterThread() { @@ -186,49 +247,194 @@ ScopedClosureRunner HangWatcher::RegisterThread() { Unretained(HangWatcher::GetInstance()))); } -void HangWatcher::Monitor() { - DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); +base::TimeTicks HangWatcher::WatchStateSnapShot::GetHighestDeadline() const { + DCHECK(!hung_watch_state_copies_.empty()); + // Since entries are sorted in increasing order the last entry is the largest + // one. + return hung_watch_state_copies_.back().deadline; +} - bool must_invoke_hang_closure = false; - { - AutoLock auto_lock(watch_state_lock_); - for (const auto& watch_state : watch_states_) { - if (watch_state->IsOverDeadline()) { - must_invoke_hang_closure = true; - break; - } +HangWatcher::WatchStateSnapShot::WatchStateSnapShot( + const HangWatchStates& watch_states, + base::TimeTicks snapshot_time, + base::TimeTicks deadline_ignore_threshold) + : snapshot_time_(snapshot_time) { + // Initial copy of the values. + for (const auto& watch_state : watch_states) { + base::TimeTicks deadline = watch_state.get()->GetDeadline(); + + if (deadline <= deadline_ignore_threshold) { + hung_watch_state_copies_.clear(); + return; + } + + // Only copy hung threads. + if (deadline <= snapshot_time) { + hung_watch_state_copies_.push_back( + WatchStateCopy{deadline, watch_state.get()->GetThreadID()}); } } - if (must_invoke_hang_closure) { - capture_in_progress.store(true, std::memory_order_relaxed); - base::AutoLock scope_lock(capture_lock_); + // Sort |hung_watch_state_copies_| by order of decreasing hang severity so the + // most severe hang is first in the list. + std::sort(hung_watch_state_copies_.begin(), hung_watch_state_copies_.end(), + [](const WatchStateCopy& lhs, const WatchStateCopy& rhs) { + return lhs.deadline < rhs.deadline; + }); +} - // Invoke the closure outside the scope of |watch_state_lock_| - // to prevent lock reentrancy. - on_hang_closure_.Run(); +HangWatcher::WatchStateSnapShot::WatchStateSnapShot( + const WatchStateSnapShot& other) = default; - capture_in_progress.store(false, std::memory_order_relaxed); - } +HangWatcher::WatchStateSnapShot::~WatchStateSnapShot() = default; - if (after_monitor_closure_for_testing_) { - after_monitor_closure_for_testing_.Run(); +std::string HangWatcher::WatchStateSnapShot::PrepareHungThreadListCrashKey() + const { + // Build a crash key string that contains the ids of the hung threads. + constexpr char kSeparator{'|'}; + std::string list_of_hung_thread_ids; + + // Add as many thread ids to the crash key as possible. + for (const WatchStateCopy& copy : hung_watch_state_copies_) { + std::string fragment = base::NumberToString(copy.thread_id) + kSeparator; + if (list_of_hung_thread_ids.size() + fragment.size() < + static_cast<std::size_t>(debug::CrashKeySize::Size256)) { + list_of_hung_thread_ids += fragment; + } else { + // Respect the by priority ordering of thread ids in the crash key by + // stopping the construction as soon as one does not fit. This avoids + // including lesser priority ids while omitting more important ones. + break; + } } + + return list_of_hung_thread_ids; +} + +HangWatcher::WatchStateSnapShot HangWatcher::GrabWatchStateSnapshotForTesting() + const { + WatchStateSnapShot snapshot(watch_states_, base::TimeTicks::Now(), + deadline_ignore_threshold_); + return snapshot; +} + +void HangWatcher::Monitor() { + DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_); + AutoLock auto_lock(watch_state_lock_); + + // If all threads unregistered since this function was invoked there's + // nothing to do anymore. + if (watch_states_.empty()) + return; + + const base::TimeTicks now = base::TimeTicks::Now(); + + // See if any thread hung. We're holding |watch_state_lock_| so threads + // can't register or unregister but their deadline still can change + // atomically. This is fine. Detecting a hang is generally best effort and + // if a thread resumes from hang in the time it takes to move on to + // capturing then its ID will be absent from the crash keys. + bool any_thread_hung = std::any_of( + watch_states_.cbegin(), watch_states_.cend(), + [this, now](const std::unique_ptr<internal::HangWatchState>& state) { + base::TimeTicks deadline = state->GetDeadline(); + return deadline > deadline_ignore_threshold_ && deadline < now; + }); + + // If at least a thread is hung we need to capture. + if (any_thread_hung) + CaptureHang(now); +} + +void HangWatcher::CaptureHang(base::TimeTicks capture_time) { + capture_in_progress.store(true, std::memory_order_relaxed); + base::AutoLock scope_lock(capture_lock_); + + WatchStateSnapShot watch_state_snapshot(watch_states_, capture_time, + deadline_ignore_threshold_); + + // The hung thread(s) could detected at the start of Monitor() could have + // moved on from their scopes. If that happened and there are no more hung + // threads then abort capture. + std::string list_of_hung_thread_ids = + watch_state_snapshot.PrepareHungThreadListCrashKey(); + if (list_of_hung_thread_ids.empty()) + return; + +#if not defined(OS_NACL) + static debug::CrashKeyString* crash_key = AllocateCrashKeyString( + "list-of-hung-threads", debug::CrashKeySize::Size256); + debug::ScopedCrashKeyString list_of_hung_threads_crash_key_string( + crash_key, list_of_hung_thread_ids); +#endif + + // To avoid capturing more than one hang that blames a subset of the same + // threads it's necessary to keep track of what is the furthest deadline + // that contributed to declaring a hang. Only once + // all threads have deadlines past this point can we be sure that a newly + // discovered hang is not directly related. + // Example: + // ********************************************************************** + // Timeline A : L------1-------2----------3-------4----------N----------- + // Timeline B : -------2----------3-------4----------L----5------N------- + // Timeline C : L----------------------------5------6----7---8------9---N + // ********************************************************************** + // In the example when a Monitor() happens during timeline A + // |deadline_ignore_threshold_| (L) is at time zero and deadlines (1-4) + // are before Now() (N) . A hang is captured and L is updated. During + // the next Monitor() (timeline B) a new deadline is over but we can't + // capture a hang because deadlines 2-4 are still live and already counted + // toward a hang. During a third monitor (timeline C) all live deadlines + // are now after L and a second hang can be recorded. + base::TimeTicks latest_expired_deadline = + watch_state_snapshot.GetHighestDeadline(); + + if (on_hang_closure_for_testing_) + on_hang_closure_for_testing_.Run(); + else + RecordHang(); + + // Update after running the actual capture. + deadline_ignore_threshold_ = latest_expired_deadline; + + capture_in_progress.store(false, std::memory_order_relaxed); } void HangWatcher::SetAfterMonitorClosureForTesting( base::RepeatingClosure closure) { + DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_); after_monitor_closure_for_testing_ = std::move(closure); } +void HangWatcher::SetOnHangClosureForTesting(base::RepeatingClosure closure) { + DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_); + on_hang_closure_for_testing_ = std::move(closure); +} + void HangWatcher::SetMonitoringPeriodForTesting(base::TimeDelta period) { + DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_); monitor_period_ = period; } +void HangWatcher::SetAfterWaitCallbackForTesting( + RepeatingCallback<void(TimeTicks)> callback) { + DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_); + after_wait_callback_ = callback; +} + void HangWatcher::SignalMonitorEventForTesting() { + DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_); should_monitor_.Signal(); } +void HangWatcher::StopMonitoringForTesting() { + keep_monitoring_.store(false, std::memory_order_relaxed); +} + +void HangWatcher::SetTickClockForTesting(const base::TickClock* tick_clock) { + tick_clock_ = tick_clock; +} + void HangWatcher::BlockIfCaptureInProgress() { // Makes a best-effort attempt to block execution if a hang is currently being // captured.Only block on |capture_lock| if |capture_in_progress| hints that @@ -262,7 +468,7 @@ namespace internal { // |deadline_| starts at Max() to avoid validation problems // when setting the first legitimate value. -HangWatchState::HangWatchState() { +HangWatchState::HangWatchState() : thread_id_(PlatformThread::CurrentId()) { // There should not exist a state object for this thread already. DCHECK(!GetHangWatchStateForCurrentThread()->Get()); @@ -330,6 +536,10 @@ HangWatchState::GetHangWatchStateForCurrentThread() { return hang_watch_state.get(); } +PlatformThreadId HangWatchState::GetThreadID() const { + return thread_id_; +} + } // namespace internal } // namespace base diff --git a/chromium/base/threading/hang_watcher.h b/chromium/base/threading/hang_watcher.h index 9b637b7dcf9..dd496c79f7d 100644 --- a/chromium/base/threading/hang_watcher.h +++ b/chromium/base/threading/hang_watcher.h @@ -11,12 +11,17 @@ #include "base/atomicops.h" #include "base/callback.h" +#include "base/callback_forward.h" #include "base/callback_helpers.h" +#include "base/compiler_specific.h" #include "base/feature_list.h" #include "base/synchronization/lock.h" +#include "base/thread_annotations.h" +#include "base/threading/platform_thread.h" #include "base/threading/simple_thread.h" #include "base/threading/thread_checker.h" #include "base/threading/thread_local.h" +#include "base/time/tick_clock.h" #include "base/time/time.h" namespace base { @@ -87,7 +92,7 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate { // The first invocation of the constructor will set the global instance // accessible through GetInstance(). This means that only one instance can // exist at a time. - explicit HangWatcher(RepeatingClosure on_hang_closure); + HangWatcher(); // Clears the global instance for the class. ~HangWatcher() override; @@ -98,10 +103,6 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate { // Returns a non-owning pointer to the global HangWatcher instance. static HangWatcher* GetInstance(); - // Invoke base::debug::DumpWithoutCrashing() insuring that the stack frame - // right under it in the trace belongs to HangWatcher for easier attribution. - NOINLINE static void RecordHang(); - // Sets up the calling thread to be monitored for threads. Returns a // ScopedClosureRunner that unregisters the thread. This closure has to be // called from the registered thread before it's joined. @@ -109,29 +110,106 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate { LOCKS_EXCLUDED(watch_state_lock_) WARN_UNUSED_RESULT; // Choose a closure to be run at the end of each call to Monitor(). Use only - // for testing. + // for testing. Reentering the HangWatcher in the closure must be done with + // care. It should only be done through certain testing functions because + // deadlocks are possible. void SetAfterMonitorClosureForTesting(base::RepeatingClosure closure); + // Choose a closure to be run instead of recording the hang. Used to test + // that certain conditions hold true at the time of recording. Use only + // for testing. Reentering the HangWatcher in the closure must be done with + // care. It should only be done through certain testing functions because + // deadlocks are possible. + void SetOnHangClosureForTesting(base::RepeatingClosure closure); + // Set a monitoring period other than the default. Use only for // testing. void SetMonitoringPeriodForTesting(base::TimeDelta period); + // Choose a callback to invoke right after waiting to monitor in Wait(). Use + // only for testing. + void SetAfterWaitCallbackForTesting( + RepeatingCallback<void(TimeTicks)> callback); + // Force the monitoring loop to resume and evaluate whether to continue. // This can trigger a call to Monitor() or not depending on why the // HangWatcher thread is sleeping. Use only for testing. void SignalMonitorEventForTesting(); + // Call to make sure no more monitoring takes place. The + // function is thread-safe and can be called at anytime but won't stop + // monitoring that is currently taking place. Use only for testing. + void StopMonitoringForTesting(); + + // Replace the clock used when calculating time spent + // sleeping. Use only for testing. + void SetTickClockForTesting(const base::TickClock* tick_clock); + // Use to block until the hang is recorded. Allows the caller to halt // execution so it does not overshoot the hang watch target and result in a // non-actionable stack trace in the crash recorded. void BlockIfCaptureInProgress(); private: - THREAD_CHECKER(thread_checker_); + // Use to assert that functions are called on the monitoring thread. + THREAD_CHECKER(hang_watcher_thread_checker_); + + // Use to assert that functions are called on the constructing thread. + THREAD_CHECKER(constructing_thread_checker_); + + // Invoke base::debug::DumpWithoutCrashing() insuring that the stack frame + // right under it in the trace belongs to HangWatcher for easier attribution. + NOINLINE static void RecordHang(); + + using HangWatchStates = + std::vector<std::unique_ptr<internal::HangWatchState>>; + + // Used to save a snapshots of the state of hang watching during capture. + // Only the state of hung threads is retained. + class BASE_EXPORT WatchStateSnapShot { + public: + struct WatchStateCopy { + base::TimeTicks deadline; + base::PlatformThreadId thread_id; + }; + + // Construct the snapshot from provided data. |snapshot_time| can be + // different than now() to be coherent with other operations recently done + // on |watch_states|. If any deadline in |watch_states| is before + // |deadline_ignore_threshold|, the snapshot is empty. + WatchStateSnapShot(const HangWatchStates& watch_states, + base::TimeTicks snapshot_time, + base::TimeTicks deadline_ignore_threshold); + WatchStateSnapShot(const WatchStateSnapShot& other); + ~WatchStateSnapShot(); + + // Returns a string that contains the ids of the hung threads separated by a + // '|'. The size of the string is capped at debug::CrashKeySize::Size256. If + // no threads are hung returns an empty string. + std::string PrepareHungThreadListCrashKey() const; + + // Return the highest deadline included in this snapshot. + base::TimeTicks GetHighestDeadline() const; + + private: + base::TimeTicks snapshot_time_; + std::vector<WatchStateCopy> hung_watch_state_copies_; + }; + + // Return a watch state snapshot taken Now() to be inspected in tests. + // NO_THREAD_SAFETY_ANALYSIS is needed because the analyzer can't figure out + // that calls to this function done from |on_hang_closure_| are properly + // locked. + WatchStateSnapShot GrabWatchStateSnapshotForTesting() const + NO_THREAD_SAFETY_ANALYSIS; // Inspects the state of all registered threads to check if they are hung and // invokes the appropriate closure if so. - void Monitor(); + void Monitor() LOCKS_EXCLUDED(watch_state_lock_); + + // Record the hang and perform the necessary housekeeping before and after. + void CaptureHang(base::TimeTicks capture_time) + EXCLUSIVE_LOCKS_REQUIRED(watch_state_lock_) LOCKS_EXCLUDED(capture_lock_); // Call Run() on the HangWatcher thread. void Start(); @@ -139,6 +217,9 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate { // Stop all monitoring and join the HangWatcher thread. void Stop(); + // Wait until it's time to monitor. + void Wait(); + // Run the loop that periodically monitors the registered thread at a // set time interval. void Run() override; @@ -158,7 +239,6 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate { // watch list. void UnregisterThread() LOCKS_EXCLUDED(watch_state_lock_); - const RepeatingClosure on_hang_closure_; Lock watch_state_lock_; std::vector<std::unique_ptr<internal::HangWatchState>> watch_states_ @@ -166,12 +246,21 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate { base::DelegateSimpleThread thread_; - base::RepeatingClosure after_monitor_closure_for_testing_; + RepeatingClosure after_monitor_closure_for_testing_; + RepeatingClosure on_hang_closure_for_testing_; + RepeatingCallback<void(TimeTicks)> after_wait_callback_; - base::Lock capture_lock_; + base::Lock capture_lock_ ACQUIRED_AFTER(watch_state_lock_); std::atomic<bool> capture_in_progress{false}; + const base::TickClock* tick_clock_; + + // The time after which all deadlines in |watch_states_| need to be for a hang + // to be reported. + base::TimeTicks deadline_ignore_threshold_; + FRIEND_TEST_ALL_PREFIXES(HangWatcherTest, NestedScopes); + FRIEND_TEST_ALL_PREFIXES(HangWatcherSnapshotTest, HungThreadIDs); }; // Classes here are exposed in the header only for testing. They are not @@ -205,7 +294,7 @@ class BASE_EXPORT HangWatchState { // store the value. To test if the deadline has expired use IsOverDeadline(). TimeTicks GetDeadline() const; - // Atomically sets the deadline to a new value and return the previous value. + // Atomically sets the deadline to a new value. void SetDeadline(TimeTicks deadline); // Tests whether the associated thread's execution has gone over the deadline. @@ -219,6 +308,8 @@ class BASE_EXPORT HangWatchState { HangWatchScope* GetCurrentHangWatchScope(); #endif + PlatformThreadId GetThreadID() const; + private: // The thread that creates the instance should be the class that updates // the deadline. @@ -228,6 +319,8 @@ class BASE_EXPORT HangWatchState { // reaches the value contained in it this constistutes a hang. std::atomic<TimeTicks> deadline_{base::TimeTicks::Max()}; + const PlatformThreadId thread_id_; + #if DCHECK_IS_ON() // Used to keep track of the current HangWatchScope and detect improper usage. // Scopes should always be destructed in reverse order from the one they were diff --git a/chromium/base/threading/hang_watcher_unittest.cc b/chromium/base/threading/hang_watcher_unittest.cc index 30c6352489b..4a6033fe235 100644 --- a/chromium/base/threading/hang_watcher_unittest.cc +++ b/chromium/base/threading/hang_watcher_unittest.cc @@ -3,16 +3,24 @@ // found in the LICENSE file. #include "base/threading/hang_watcher.h" +#include <atomic> #include <memory> +#include "base/barrier_closure.h" #include "base/bind.h" #include "base/callback.h" #include "base/callback_helpers.h" +#include "base/run_loop.h" +#include "base/strings/string_number_conversions.h" #include "base/synchronization/lock.h" #include "base/synchronization/waitable_event.h" #include "base/test/bind_test_util.h" +#include "base/test/simple_test_tick_clock.h" #include "base/test/task_environment.h" +#include "base/test/test_timeouts.h" #include "base/threading/platform_thread.h" +#include "base/threading/thread_checker.h" +#include "base/time/tick_clock.h" #include "base/time/time.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" @@ -20,22 +28,24 @@ namespace base { namespace { -const base::TimeDelta kTimeout = base::TimeDelta::FromSeconds(10); -const base::TimeDelta kHangTime = kTimeout + base::TimeDelta::FromSeconds(1); - // Waits on provided WaitableEvent before executing and signals when done. -class BlockingThread : public PlatformThread::Delegate { +class BlockingThread : public DelegateSimpleThread::Delegate { public: - explicit BlockingThread(base::WaitableEvent* unblock_thread) - : unblock_thread_(unblock_thread) {} + explicit BlockingThread(base::WaitableEvent* unblock_thread, + base::TimeDelta timeout) + : thread_(this, "BlockingThread"), + unblock_thread_(unblock_thread), + timeout_(timeout) {} + + ~BlockingThread() override = default; - void ThreadMain() override { + void Run() override { // (Un)Register the thread here instead of in ctor/dtor so that the action // happens on the right thread. base::ScopedClosureRunner unregister_closure = base::HangWatcher::GetInstance()->RegisterThread(); - HangWatchScope scope(kTimeout); + HangWatchScope scope(timeout_); wait_until_entered_scope_.Signal(); unblock_thread_->Wait(); @@ -44,11 +54,20 @@ class BlockingThread : public PlatformThread::Delegate { bool IsDone() { return run_event_.IsSignaled(); } - // Block until this thread registered itself for hang watching and has entered - // a HangWatchScope. - void WaitUntilScopeEntered() { wait_until_entered_scope_.Wait(); } + void StartAndWaitForScopeEntered() { + thread_.Start(); + // Block until this thread registered itself for hang watching and has + // entered a HangWatchScope. + wait_until_entered_scope_.Wait(); + } + + void Join() { thread_.Join(); } + + PlatformThreadId GetId() { return thread_.tid(); } private: + base::DelegateSimpleThread thread_; + // Will be signaled once the thread is properly registered for watching and // scope has been entered. WaitableEvent wait_until_entered_scope_; @@ -57,23 +76,65 @@ class BlockingThread : public PlatformThread::Delegate { WaitableEvent run_event_; base::WaitableEvent* const unblock_thread_; + + base::TimeDelta timeout_; }; class HangWatcherTest : public testing::Test { public: - HangWatcherTest() - : hang_watcher_(std::make_unique<HangWatcher>( - base::BindRepeating(&WaitableEvent::Signal, - base::Unretained(&hang_event_)))), - thread_(&unblock_thread_) { - hang_watcher_->SetAfterMonitorClosureForTesting(base::BindRepeating( + const base::TimeDelta kTimeout = base::TimeDelta::FromSeconds(10); + const base::TimeDelta kHangTime = kTimeout + base::TimeDelta::FromSeconds(1); + + HangWatcherTest() { + hang_watcher_.SetAfterMonitorClosureForTesting(base::BindRepeating( &WaitableEvent::Signal, base::Unretained(&monitor_event_))); - } - void SetUp() override { + hang_watcher_.SetOnHangClosureForTesting(base::BindRepeating( + &WaitableEvent::Signal, base::Unretained(&hang_event_))); + // We're not testing the monitoring loop behavior in this test so we want to // trigger monitoring manually. - hang_watcher_->SetMonitoringPeriodForTesting(base::TimeDelta::Max()); + hang_watcher_.SetMonitoringPeriodForTesting(base::TimeDelta::Max()); + } + + HangWatcherTest(const HangWatcherTest& other) = delete; + HangWatcherTest& operator=(const HangWatcherTest& other) = delete; + + protected: + // Used to wait for monitoring. Will be signaled by the HangWatcher thread and + // so needs to outlive it. + WaitableEvent monitor_event_; + + // Signaled from the HangWatcher thread when a hang is detected. Needs to + // outlive the HangWatcher thread. + WaitableEvent hang_event_; + + HangWatcher hang_watcher_; + + // Used exclusively for MOCK_TIME. No tasks will be run on the environment. + // Single threaded to avoid ThreadPool WorkerThreads registering. + test::SingleThreadTaskEnvironment task_environment_{ + test::TaskEnvironment::TimeSource::MOCK_TIME}; +}; + +class HangWatcherBlockingThreadTest : public HangWatcherTest { + public: + HangWatcherBlockingThreadTest() : thread_(&unblock_thread_, kTimeout) {} + + HangWatcherBlockingThreadTest(const HangWatcherBlockingThreadTest& other) = + delete; + HangWatcherBlockingThreadTest& operator=( + const HangWatcherBlockingThreadTest& other) = delete; + + protected: + void JoinThread() { + unblock_thread_.Signal(); + + // Thread is joinable since we signaled |unblock_thread_|. + thread_.Join(); + + // If thread is done then it signaled. + ASSERT_TRUE(thread_.IsDone()); } void StartBlockedThread() { @@ -82,9 +143,7 @@ class HangWatcherTest : public testing::Test { // Start the thread. It will block since |unblock_thread_| was not // signaled yet. - ASSERT_TRUE(PlatformThread::Create(0, &thread_, &handle)); - - thread_.WaitUntilScopeEntered(); + thread_.StartAndWaitForScopeEntered(); // Thread registration triggered a call to HangWatcher::Monitor() which // signaled |monitor_event_|. Reset it so it's ready for waiting later on. @@ -97,40 +156,16 @@ class HangWatcherTest : public testing::Test { ASSERT_FALSE(monitor_event_.IsSignaled()); // Triger a monitoring on HangWatcher thread and verify results. - hang_watcher_->SignalMonitorEventForTesting(); + hang_watcher_.SignalMonitorEventForTesting(); monitor_event_.Wait(); - unblock_thread_.Signal(); - - // Thread is joinable since we signaled |unblock_thread_|. - PlatformThread::Join(handle); - - // If thread is done then it signaled. - ASSERT_TRUE(thread_.IsDone()); + JoinThread(); } - protected: - // Used to wait for monitoring. Will be signaled by the HangWatcher thread and - // so needs to outlive it. - WaitableEvent monitor_event_; - - // Signaled from the HangWatcher thread when a hang is detected. Needs to - // outlive the HangWatcher thread. - WaitableEvent hang_event_; - - std::unique_ptr<HangWatcher> hang_watcher_; - - // Used exclusively for MOCK_TIME. No tasks will be run on the environment. - // Single threaded to avoid ThreadPool WorkerThreads registering. - test::SingleThreadTaskEnvironment task_environment_{ - test::TaskEnvironment::TimeSource::MOCK_TIME}; - // Used to unblock the monitored thread. Signaled from the test main thread. WaitableEvent unblock_thread_; - PlatformThreadHandle handle; BlockingThread thread_; - }; } // namespace @@ -188,7 +223,7 @@ TEST_F(HangWatcherTest, NestedScopes) { ASSERT_EQ(current_hang_watch_state->GetDeadline(), original_deadline); } -TEST_F(HangWatcherTest, Hang) { +TEST_F(HangWatcherBlockingThreadTest, Hang) { StartBlockedThread(); // Simulate hang. @@ -198,88 +233,302 @@ TEST_F(HangWatcherTest, Hang) { ASSERT_TRUE(hang_event_.IsSignaled()); } -TEST_F(HangWatcherTest, NoHang) { +TEST_F(HangWatcherBlockingThreadTest, NoHang) { StartBlockedThread(); MonitorHangsAndJoinThread(); ASSERT_FALSE(hang_event_.IsSignaled()); } -// |HangWatcher| relies on |WaitableEvent::TimedWait| to schedule monitoring -// which cannot be tested using MockTime. Some tests will have to actually wait -// in real time before observing results but the TimeDeltas used are chosen to -// minimize flakiness as much as possible. -class HangWatcherRealTimeTest : public testing::Test { +namespace { +class HangWatcherSnapshotTest : public testing::Test { public: - HangWatcherRealTimeTest() - : hang_watcher_(std::make_unique<HangWatcher>( - base::BindRepeating(&WaitableEvent::Signal, - base::Unretained(&hang_event_)))) {} + HangWatcherSnapshotTest() = default; + HangWatcherSnapshotTest(const HangWatcherSnapshotTest& other) = delete; + HangWatcherSnapshotTest& operator=(const HangWatcherSnapshotTest& other) = + delete; protected: - std::unique_ptr<HangWatcher> hang_watcher_; + void TriggerMonitorAndWaitForCompletion() { + monitor_event_.Reset(); + hang_watcher_.SignalMonitorEventForTesting(); + monitor_event_.Wait(); + } + + // Verify that a capture takes place and that at the time of the capture the + // list of hung thread ids is correct. + void TestIDList(const std::string& id_list) { + list_of_hung_thread_ids_during_capture_ = id_list; + TriggerMonitorAndWaitForCompletion(); + ASSERT_EQ(++reference_capture_count_, hang_capture_count_); + } + + // Verify that even if hang monitoring takes place no hangs are detected. + void ExpectNoCapture() { + int old_capture_count = hang_capture_count_; + TriggerMonitorAndWaitForCompletion(); + ASSERT_EQ(old_capture_count, hang_capture_count_); + } + + std::string ConcatenateThreadIds( + const std::vector<base::PlatformThreadId>& ids) const { + std::string result; + constexpr char kSeparator{'|'}; + + for (PlatformThreadId id : ids) { + result += base::NumberToString(id) + kSeparator; + } + + return result; + } + + // Will be signaled once monitoring took place. Marks the end of the test. + WaitableEvent monitor_event_; + + const PlatformThreadId test_thread_id_ = PlatformThread::CurrentId(); + + // This is written to by the test main thread and read from the hang watching + // thread. It does not need to be protected because access to it is + // synchronized by always setting before triggering the execution of the + // reading code through HangWatcher::SignalMonitorEventForTesting(). + std::string list_of_hung_thread_ids_during_capture_; + + // This is written to by from the hang watching thread and read the test main + // thread. It does not need to be protected because access to it is + // synchronized by always reading after monitor_event_ has been signaled. + int hang_capture_count_ = 0; + + // Increases at the same time as |hang_capture_count_| to test that capture + // actually took place. + int reference_capture_count_ = 0; + + HangWatcher hang_watcher_; +}; +} // namespace + +// TODO(crbug.com/2193655): Test flaky on iPad. +TEST_F(HangWatcherSnapshotTest, DISABLED_HungThreadIDs) { + // During hang capture the list of hung threads should be populated. + hang_watcher_.SetOnHangClosureForTesting(base::BindLambdaForTesting([this]() { + EXPECT_EQ(hang_watcher_.GrabWatchStateSnapshotForTesting() + .PrepareHungThreadListCrashKey(), + list_of_hung_thread_ids_during_capture_); + ++hang_capture_count_; + })); + + // When hang capture is over the list should be empty. + hang_watcher_.SetAfterMonitorClosureForTesting( + base::BindLambdaForTesting([this]() { + EXPECT_EQ(hang_watcher_.GrabWatchStateSnapshotForTesting() + .PrepareHungThreadListCrashKey(), + ""); + monitor_event_.Signal(); + })); + + // Register the main test thread for hang watching. + auto unregister_thread_closure_ = hang_watcher_.RegisterThread(); + + BlockingThread blocking_thread(&monitor_event_, base::TimeDelta{}); + blocking_thread.StartAndWaitForScopeEntered(); + { + // Start a hang watch scope that expires right away. Ensures that + // the first monitor will detect a hang. This scope will naturally have a + // later deadline than the one in |blocking_thread_| since it was created + // after. + HangWatchScope expires_instantly(base::TimeDelta{}); + + // Hung thread list should contain the id the blocking thread and then the + // id of the test main thread since that is the order of increasing + // deadline. + TestIDList( + ConcatenateThreadIds({blocking_thread.GetId(), test_thread_id_})); + + // |expires_instantly| and the scope from |blocking_thread| are still live + // but already recorded so should be ignored. + ExpectNoCapture(); + + // Thread is joinable since we signaled |monitor_event_|. This closes the + // scope in |blocking_thread|. + blocking_thread.Join(); + + // |expires_instantly| is still live but already recorded so should be + // ignored. + ExpectNoCapture(); + } + + // All hang watch scopes are over. There should be no capture. + ExpectNoCapture(); + + // Once all recorded scopes are over creating a new one and monitoring will + // trigger a hang detection. + HangWatchScope expires_instantly(base::TimeDelta{}); + TestIDList(ConcatenateThreadIds({test_thread_id_})); +} + +namespace { + +// Determines how long the HangWatcher will wait between calls to +// Monitor(). Choose a low value so that that successive invocations happens +// fast. This makes tests that wait for monitoring run fast and makes tests that +// expect no monitoring fail fast. +const base::TimeDelta kMonitoringPeriod = base::TimeDelta::FromMilliseconds(1); + +// Test if and how often the HangWatcher periodically monitors for hangs. +class HangWatcherPeriodicMonitoringTest : public testing::Test { + public: + HangWatcherPeriodicMonitoringTest() { + hang_watcher_.SetMonitoringPeriodForTesting(kMonitoringPeriod); + hang_watcher_.SetOnHangClosureForTesting(base::BindRepeating( + &WaitableEvent::Signal, base::Unretained(&hang_event_))); + + // HangWatcher uses a TickClock to detect how long it slept in between calls + // to Monitor(). Override that clock to control its subjective passage of + // time. + hang_watcher_.SetTickClockForTesting(&test_clock_); + } + + HangWatcherPeriodicMonitoringTest( + const HangWatcherPeriodicMonitoringTest& other) = delete; + HangWatcherPeriodicMonitoringTest& operator=( + const HangWatcherPeriodicMonitoringTest& other) = delete; + + protected: + // Setup the callback invoked after waiting in HangWatcher to advance the + // tick clock by the desired time delta. + void InstallAfterWaitCallback(base::TimeDelta time_delta) { + hang_watcher_.SetAfterWaitCallbackForTesting(base::BindLambdaForTesting( + [this, time_delta](base::TimeTicks time_before_wait) { + test_clock_.Advance(time_delta); + })); + } + + base::SimpleTestTickClock test_clock_; + + // Single threaded to avoid ThreadPool WorkerThreads registering. Will run + // delayed tasks created by the tests. + test::SingleThreadTaskEnvironment task_environment_; + + std::unique_ptr<base::TickClock> fake_tick_clock_; + HangWatcher hang_watcher_; // Signaled when a hang is detected. WaitableEvent hang_event_; - std::atomic<int> monitor_count_{0}; - base::ScopedClosureRunner unregister_thread_closure_; }; +} // namespace -// TODO(https://crbug.com/1064116): Fix this test not to rely on timely task -// execution, which results in flakiness on slower bots. -TEST_F(HangWatcherRealTimeTest, DISABLED_PeriodicCallsCount) { - // These values are chosen to execute fast enough while running the unit tests - // but be large enough to buffer against clock precision problems. - const base::TimeDelta kMonitoringPeriod( - base::TimeDelta::FromMilliseconds(100)); - const base::TimeDelta kExecutionTime = kMonitoringPeriod * 5; +// Don't register any threads for hang watching. HangWatcher should not monitor. +TEST_F(HangWatcherPeriodicMonitoringTest, + NoPeriodicMonitoringWithoutRegisteredThreads) { + RunLoop run_loop; + + // If a call to HangWatcher::Monitor() takes place the test will instantly + // fail. + hang_watcher_.SetAfterMonitorClosureForTesting( + base::BindLambdaForTesting([&run_loop]() { + ADD_FAILURE() << "Monitoring took place!"; + run_loop.Quit(); + })); + + // Make the HangWatcher tick clock advance by exactly the monitoring period + // after waiting so it will never detect oversleeping between attempts to call + // Monitor(). This would inhibit monitoring and make the test pass for the + // wrong reasons. + InstallAfterWaitCallback(kMonitoringPeriod); + + // Unblock the test thread. No thread ever registered after the HangWatcher + // was created in the test's constructor. No monitoring should have taken + // place. + task_environment_.GetMainThreadTaskRunner()->PostDelayedTask( + FROM_HERE, run_loop.QuitClosure(), TestTimeouts::tiny_timeout()); + run_loop.Run(); + + // NOTE: + // A lack of calls could technically also be caused by the HangWatcher thread + // executing too slowly / being descheduled. This is a known limitation. + // It's expected for |TestTimeouts::tiny_timeout()| to be large enough that + // this is rare. +} +// During normal execution periodic monitorings should take place. +TEST_F(HangWatcherPeriodicMonitoringTest, PeriodicCallsTakePlace) { // HangWatcher::Monitor() will run once right away on thread registration. - // We want to make sure it runs at least once more from being scheduled. - constexpr int kMinimumMonitorCount = 2; - - // Some amount of extra monitoring can happen but it has to be of the right - // order of magnitude. Otherwise it could indicate a problem like some code - // signaling the Thread to wake up excessivelly. - const int kMaximumMonitorCount = 2 * (kExecutionTime / kMonitoringPeriod); + // We want to make sure it runs at a couple more times from being scheduled. + constexpr int kMinimumMonitorCount = 3; - auto increment_monitor_count = [this]() { ++monitor_count_; }; + RunLoop run_loop; - hang_watcher_->SetMonitoringPeriodForTesting(kMonitoringPeriod); - hang_watcher_->SetAfterMonitorClosureForTesting( - base::BindLambdaForTesting(increment_monitor_count)); + // Setup the HangWatcher to unblock run_loop when the Monitor() has been + // invoked enough times. + hang_watcher_.SetAfterMonitorClosureForTesting(BarrierClosure( + kMinimumMonitorCount, base::BindLambdaForTesting([&run_loop, this]() { + // Test condition are confirmed, stop monitoring. + hang_watcher_.StopMonitoringForTesting(); - hang_event_.TimedWait(kExecutionTime); + // Unblock the test main thread. + run_loop.Quit(); + }))); - // No thread ever registered so no monitoring took place at all. - ASSERT_EQ(monitor_count_.load(), 0); + // Make the HangWatcher tick clock advance by exactly the monitoring period + // after waiting so it will never detect oversleeping between attempts to call + // Monitor(). This would inhibit monitoring. + InstallAfterWaitCallback(kMonitoringPeriod); - unregister_thread_closure_ = hang_watcher_->RegisterThread(); + // Register a thread, kicks off monitoring. + unregister_thread_closure_ = hang_watcher_.RegisterThread(); - hang_event_.TimedWait(kExecutionTime); - - ASSERT_GE(monitor_count_.load(), kMinimumMonitorCount); - ASSERT_LE(monitor_count_.load(), kMaximumMonitorCount); + run_loop.Run(); // No monitored scope means no possible hangs. ASSERT_FALSE(hang_event_.IsSignaled()); } +// If the HangWatcher detects it slept for longer than expected it will not +// monitor. +// TODO(crbug.com/1081654): Test flaky on ChromeOS. +TEST_F(HangWatcherPeriodicMonitoringTest, DISABLED_NoMonitorOnOverSleep) { + RunLoop run_loop; + + // If a call to HangWatcher::Monitor() takes place the test will instantly + // fail. + hang_watcher_.SetAfterMonitorClosureForTesting( + base::BindLambdaForTesting([&run_loop]() { + ADD_FAILURE() << "Monitoring took place!"; + run_loop.Quit(); + })); + + // Make the HangWatcher tick clock advance so much after waiting that it will + // detect oversleeping every time. This will keep it from monitoring. + InstallAfterWaitCallback(base::TimeDelta::FromMinutes(1)); + + // Register a thread, kicks off monitoring. + unregister_thread_closure_ = hang_watcher_.RegisterThread(); + + // Unblock the test thread. All waits were perceived as oversleeping so all + // monitoring was inhibited. + task_environment_.GetMainThreadTaskRunner()->PostDelayedTask( + FROM_HERE, run_loop.QuitClosure(), TestTimeouts::tiny_timeout()); + run_loop.Run(); + + // NOTE: A lack of calls could technically also be caused by the HangWatcher + // thread executing too slowly / being descheduled. This is a known + // limitation. It's expected for |TestTimeouts::tiny_timeout()| to be large + // enough that this happens rarely. +} + +namespace { class HangWatchScopeBlockingTest : public testing::Test { public: - void SetUp() override { - // Start the HangWatcher. - hang_watcher_ = - std::make_unique<HangWatcher>(base::BindLambdaForTesting([&] { - capture_started_.Signal(); - // Simulate capturing that takes a long time. - PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100)); - completed_capture_ = true; - })); - - hang_watcher_->SetAfterMonitorClosureForTesting( + HangWatchScopeBlockingTest() { + hang_watcher_.SetOnHangClosureForTesting(base::BindLambdaForTesting([&] { + capture_started_.Signal(); + // Simulate capturing that takes a long time. + PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100)); + completed_capture_ = true; + })); + + hang_watcher_.SetAfterMonitorClosureForTesting( base::BindLambdaForTesting([&]() { // Simulate monitoring that takes a long time. PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100)); @@ -287,12 +536,16 @@ class HangWatchScopeBlockingTest : public testing::Test { })); // Make sure no periodic monitoring takes place. - hang_watcher_->SetMonitoringPeriodForTesting(base::TimeDelta::Max()); + hang_watcher_.SetMonitoringPeriodForTesting(base::TimeDelta::Max()); // Register the test main thread for hang watching. - unregister_thread_closure_ = hang_watcher_->RegisterThread(); + unregister_thread_closure_ = hang_watcher_.RegisterThread(); } + HangWatchScopeBlockingTest(const HangWatchScopeBlockingTest& other) = delete; + HangWatchScopeBlockingTest& operator=( + const HangWatchScopeBlockingTest& other) = delete; + void VerifyScopesDontBlock() { // Start a hang watch scope that cannot possibly cause a hang to be // detected. @@ -300,7 +553,7 @@ class HangWatchScopeBlockingTest : public testing::Test { HangWatchScope long_scope(base::TimeDelta::Max()); // Manually trigger a monitoring. - hang_watcher_->SignalMonitorEventForTesting(); + hang_watcher_.SignalMonitorEventForTesting(); // Execution has to continue freely here as no capture is in progress. } @@ -326,9 +579,10 @@ class HangWatchScopeBlockingTest : public testing::Test { // capture the accesses are serialized by the blocking in ~HangWatchScope(). bool completed_capture_ = false; - std::unique_ptr<HangWatcher> hang_watcher_; + HangWatcher hang_watcher_; base::ScopedClosureRunner unregister_thread_closure_; }; +} // namespace // Tests that execution is unimpeded by ~HangWatchScope() when no capture ever // takes place. @@ -342,21 +596,28 @@ TEST_F(HangWatchScopeBlockingTest, ScopeBlocksDuringCapture) { // Start a hang watch scope that expires in the past already. Ensures that the // first monitor will detect a hang. { - HangWatchScope already_over(base::TimeDelta::FromDays(-1)); + // Start a hang watch scope that expires immediately . Ensures that + // the first monitor will detect a hang. + BlockingThread blocking_thread(&capture_started_, + base::TimeDelta::FromMilliseconds(0)); + blocking_thread.StartAndWaitForScopeEntered(); // Manually trigger a monitoring. - hang_watcher_->SignalMonitorEventForTesting(); + hang_watcher_.SignalMonitorEventForTesting(); // Ensure that the hang capturing started. capture_started_.Wait(); // Execution will get stuck in this scope because execution does not escape // ~HangWatchScope() if a hang capture is under way. + + blocking_thread.Join(); } // A hang was in progress so execution should have been blocked in // BlockWhileCaptureInProgress() until capture finishes. EXPECT_TRUE(completed_capture_); + completed_monitoring_.Wait(); // Reset expectations completed_monitoring_.Reset(); diff --git a/chromium/base/threading/platform_thread_internal_posix.cc b/chromium/base/threading/platform_thread_internal_posix.cc index 378a24d0d1d..459b2ccffc0 100644 --- a/chromium/base/threading/platform_thread_internal_posix.cc +++ b/chromium/base/threading/platform_thread_internal_posix.cc @@ -5,7 +5,7 @@ #include "base/threading/platform_thread_internal_posix.h" #include "base/containers/adapters.h" -#include "base/logging.h" +#include "base/notreached.h" namespace base { diff --git a/chromium/base/threading/platform_thread_mac.mm b/chromium/base/threading/platform_thread_mac.mm index 296b5992abd..00f2f976858 100644 --- a/chromium/base/threading/platform_thread_mac.mm +++ b/chromium/base/threading/platform_thread_mac.mm @@ -163,9 +163,23 @@ void PlatformThread::SetCurrentThreadPriorityImpl(ThreadPriority priority) { [[NSThread currentThread] setThreadPriority:0]; break; case ThreadPriority::NORMAL: - case ThreadPriority::DISPLAY: [[NSThread currentThread] setThreadPriority:0.5]; break; + case ThreadPriority::DISPLAY: { + // Apple has suggested that insufficient priority may be the reason for + // Metal shader compilation hangs. A priority of 50 is higher than user + // input. + // https://crbug.com/974219. + [[NSThread currentThread] setThreadPriority:1.0]; + sched_param param; + int policy; + pthread_t thread = pthread_self(); + if (!pthread_getschedparam(thread, &policy, ¶m)) { + param.sched_priority = 50; + pthread_setschedparam(thread, policy, ¶m); + } + break; + } case ThreadPriority::REALTIME_AUDIO: SetPriorityRealtimeAudio(); DCHECK_EQ([[NSThread currentThread] threadPriority], 1.0); diff --git a/chromium/base/threading/post_task_and_reply_impl.cc b/chromium/base/threading/post_task_and_reply_impl.cc index cc802746ee4..7dba8374e56 100644 --- a/chromium/base/threading/post_task_and_reply_impl.cc +++ b/chromium/base/threading/post_task_and_reply_impl.cc @@ -7,8 +7,8 @@ #include <utility> #include "base/bind.h" +#include "base/check_op.h" #include "base/debug/leak_annotations.h" -#include "base/logging.h" #include "base/memory/ref_counted.h" #include "base/sequenced_task_runner.h" #include "base/threading/sequenced_task_runner_handle.h" diff --git a/chromium/base/threading/scoped_blocking_call.cc b/chromium/base/threading/scoped_blocking_call.cc index 2b501b77b90..62edcd2af48 100644 --- a/chromium/base/threading/scoped_blocking_call.cc +++ b/chromium/base/threading/scoped_blocking_call.cc @@ -27,7 +27,10 @@ LazyInstance<ThreadLocalBoolean>::Leaky tls_construction_in_progress = ScopedBlockingCall::ScopedBlockingCall(const Location& from_here, BlockingType blocking_type) - : UncheckedScopedBlockingCall(from_here, blocking_type) { + : UncheckedScopedBlockingCall( + from_here, + blocking_type, + UncheckedScopedBlockingCall::BlockingCallType::kRegular) { #if DCHECK_IS_ON() DCHECK(!tls_construction_in_progress.Get().Get()); tls_construction_in_progress.Get().Set(true); @@ -51,7 +54,10 @@ namespace internal { ScopedBlockingCallWithBaseSyncPrimitives:: ScopedBlockingCallWithBaseSyncPrimitives(const Location& from_here, BlockingType blocking_type) - : UncheckedScopedBlockingCall(from_here, blocking_type) { + : UncheckedScopedBlockingCall( + from_here, + blocking_type, + UncheckedScopedBlockingCall::BlockingCallType::kBaseSyncPrimitives) { #if DCHECK_IS_ON() DCHECK(!tls_construction_in_progress.Get().Get()); tls_construction_in_progress.Get().Set(true); diff --git a/chromium/base/threading/scoped_blocking_call.h b/chromium/base/threading/scoped_blocking_call.h index 1681fe9cdcc..f141eb73f65 100644 --- a/chromium/base/threading/scoped_blocking_call.h +++ b/chromium/base/threading/scoped_blocking_call.h @@ -6,7 +6,9 @@ #define BASE_THREADING_SCOPED_BLOCKING_CALL_H #include "base/base_export.h" +#include "base/callback_forward.h" #include "base/location.h" +#include "base/strings/string_piece.h" #include "base/threading/scoped_blocking_call_internal.h" namespace base { @@ -111,6 +113,18 @@ class BASE_EXPORT ScopedBlockingCallWithBaseSyncPrimitives } // namespace internal +using IOJankReportingCallback = + RepeatingCallback<void(int janky_intervals_per_minute, + int total_janks_per_minute)>; +// Enables IO jank monitoring and reporting for this process. Should be called +// at most once per process and only if +// base::TimeTicks::IsConsistentAcrossProcesses() (the algorithm is unsafe +// otherwise). |reporting_callback| will be invoked each time a monitoring +// window completes, see internal::~IOJankMonitoringWindow() for details (must +// be thread-safe). +void BASE_EXPORT +EnableIOJankMonitoringForProcess(IOJankReportingCallback reporting_callback); + } // namespace base #endif // BASE_THREADING_SCOPED_BLOCKING_CALL_H diff --git a/chromium/base/threading/scoped_blocking_call_internal.cc b/chromium/base/threading/scoped_blocking_call_internal.cc index 05e1eb8553b..7ee3d709c3d 100644 --- a/chromium/base/threading/scoped_blocking_call_internal.cc +++ b/chromium/base/threading/scoped_blocking_call_internal.cc @@ -4,9 +4,19 @@ #include "base/threading/scoped_blocking_call_internal.h" +#include <algorithm> +#include <utility> + +#include "base/bind.h" +#include "base/bind_helpers.h" +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" +#include "base/no_destructor.h" #include "base/scoped_clear_last_error.h" +#include "base/task/scoped_set_task_priority_for_current_thread.h" +#include "base/task/thread_pool.h" +#include "base/task/thread_pool/environment_config.h" +#include "base/task/thread_pool/thread_pool_instance.h" #include "base/threading/scoped_blocking_call.h" #include "base/threading/thread_local.h" #include "build/build_config.h" @@ -20,13 +30,18 @@ namespace { // echo -n "ScopedBlockingCall" | sha1sum constexpr uint32_t kActivityTrackerId = 0x11be9915; -LazyInstance<ThreadLocalPointer<internal::BlockingObserver>>::Leaky +LazyInstance<ThreadLocalPointer<BlockingObserver>>::Leaky tls_blocking_observer = LAZY_INSTANCE_INITIALIZER; // Last ScopedBlockingCall instantiated on this thread. -LazyInstance<ThreadLocalPointer<internal::UncheckedScopedBlockingCall>>::Leaky +LazyInstance<ThreadLocalPointer<UncheckedScopedBlockingCall>>::Leaky tls_last_scoped_blocking_call = LAZY_INSTANCE_INITIALIZER; +bool IsBackgroundPriorityWorker() { + return GetTaskPriorityForCurrentThread() == TaskPriority::BEST_EFFORT && + CanUseBackgroundPriorityForWorkerThread(); +} + } // namespace void SetBlockingObserverForCurrentThread(BlockingObserver* blocking_observer) { @@ -38,9 +53,210 @@ void ClearBlockingObserverForCurrentThread() { tls_blocking_observer.Get().Set(nullptr); } +IOJankMonitoringWindow::ScopedMonitoredCall::ScopedMonitoredCall() + : call_start_(TimeTicks::Now()), + assigned_jank_window_(MonitorNextJankWindowIfNecessary(call_start_)) {} + +IOJankMonitoringWindow::ScopedMonitoredCall::~ScopedMonitoredCall() { + if (assigned_jank_window_) { + assigned_jank_window_->OnBlockingCallCompleted(call_start_, + TimeTicks::Now()); + } +} + +void IOJankMonitoringWindow::ScopedMonitoredCall::Cancel() { + assigned_jank_window_ = nullptr; +} + +IOJankMonitoringWindow::IOJankMonitoringWindow(TimeTicks start_time) + : start_time_(start_time) {} + +// static +void IOJankMonitoringWindow::CancelMonitoringForTesting() { + AutoLock lock(current_jank_window_lock()); + current_jank_window_storage() = nullptr; + reporting_callback_storage() = NullCallback(); +} + +// static +constexpr TimeDelta IOJankMonitoringWindow::kIOJankInterval; +// static +constexpr TimeDelta IOJankMonitoringWindow::kMonitoringWindow; +// static +constexpr TimeDelta IOJankMonitoringWindow::kTimeDiscrepancyTimeout; +// static +constexpr int IOJankMonitoringWindow::kNumIntervals; + +// static +scoped_refptr<IOJankMonitoringWindow> +IOJankMonitoringWindow::MonitorNextJankWindowIfNecessary(TimeTicks recent_now) { + DCHECK_GE(TimeTicks::Now(), recent_now); + + scoped_refptr<IOJankMonitoringWindow> next_jank_window; + + { + AutoLock lock(current_jank_window_lock()); + + if (!reporting_callback_storage()) + return nullptr; + + scoped_refptr<IOJankMonitoringWindow>& current_jank_window_ref = + current_jank_window_storage(); + + // Start the next window immediately after the current one (rather than + // based on Now() to avoid uncovered gaps). Only use Now() for the very + // first window in a monitoring chain. + TimeTicks next_window_start_time = + current_jank_window_ref + ? current_jank_window_ref->start_time_ + kMonitoringWindow + : recent_now; + + if (next_window_start_time > recent_now) { + // Another thread beat us to constructing the next monitoring window and + // |current_jank_window_ref| already covers |recent_now|. + return current_jank_window_ref; + } + + if (recent_now - next_window_start_time >= kTimeDiscrepancyTimeout) { + // If the delayed task runs on a regular heartbeat, |recent_now| should be + // roughly equal to |next_window_start_time|. If we miss by more than + // kTimeDiscrepancyTimeout, we likely hit machine sleep, cancel sampling + // that window in that case. + // + // Note: It is safe to touch |canceled_| without a lock here as this is + // the only time it's set and it naturally happens-before + // |current_jank_window_ref|'s destructor reads it. + current_jank_window_ref->canceled_ = true; + next_window_start_time = recent_now; + } + + next_jank_window = + MakeRefCounted<IOJankMonitoringWindow>(next_window_start_time); + + if (current_jank_window_ref && !current_jank_window_ref->canceled_) { + // If there are still IO operations in progress within + // |current_jank_window_ref|, they have a ref to it and will be the ones + // triggering ~IOJankMonitoringWindow(). When doing so, they will overlap + // into the |next_jank_window| we are setting up (|next_| will also own a + // ref so a very long jank can safely unwind across a chain of pending + // |next_|'s). + DCHECK(!current_jank_window_ref->next_); + current_jank_window_ref->next_ = next_jank_window; + } + + // Make |next_jank_window| the new current before releasing the lock. + current_jank_window_ref = next_jank_window; + } + + // Post a task to kick off the next monitoring window if no monitored thread + // beats us to it. Adjust the timing to alleviate any drift in the timer. Do + // this outside the lock to avoid scheduling tasks while holding it. + ThreadPool::PostDelayedTask( + FROM_HERE, BindOnce([]() { + IOJankMonitoringWindow::MonitorNextJankWindowIfNecessary( + TimeTicks::Now()); + }), + kMonitoringWindow - (recent_now - next_jank_window->start_time_)); + + return next_jank_window; +} + +// NO_THREAD_SAFETY_ANALYSIS because ~RefCountedThreadSafe() guarantees we're +// the last ones to access this state (and ordered after all other accesses). +IOJankMonitoringWindow::~IOJankMonitoringWindow() NO_THREAD_SAFETY_ANALYSIS { + if (canceled_) + return; + + int janky_intervals_count = 0; + int total_jank_count = 0; + + for (size_t interval_jank_count : intervals_jank_count_) { + if (interval_jank_count > 0) { + ++janky_intervals_count; + total_jank_count += interval_jank_count; + } + } + + // reporting_callback_storage() is safe to access without lock because an + // IOJankMonitoringWindow existing means we're after the call to + // EnableIOJankMonitoringForProcess() and it will not change after that call. + DCHECK(reporting_callback_storage()); + reporting_callback_storage().Run(janky_intervals_count, total_jank_count); +} + +void IOJankMonitoringWindow::OnBlockingCallCompleted(TimeTicks call_start, + TimeTicks call_end) { + if (call_end - call_start < kIOJankInterval) + return; + + // Make sure the chain of |next_| pointers is sufficient to reach + // |call_end| (e.g. if this runs before the delayed task kicks in) + if (call_end >= start_time_ + kMonitoringWindow) + MonitorNextJankWindowIfNecessary(call_end); + + const int jank_start_index = (call_start - start_time_) / kIOJankInterval; + const int num_janky_intervals = (call_end - call_start) / kIOJankInterval; + + AddJank(jank_start_index, num_janky_intervals); +} + +void IOJankMonitoringWindow::AddJank(int local_jank_start_index, + int num_janky_intervals) { + // Increment jank counts for intervals in this window. If + // |num_janky_intervals| lands beyond kNumIntervals, the additional intervals + // will be reported to |next_|. + const int jank_end_index = local_jank_start_index + num_janky_intervals; + const int local_jank_end_index = std::min(kNumIntervals, jank_end_index); + + { + // Note: while this window could be |canceled| here we must add our count + // unconditionally as it is only thread-safe to read |canceled| in + // ~IOJankMonitoringWindow(). + AutoLock lock(intervals_lock_); + for (int i = local_jank_start_index; i < local_jank_end_index; ++i) + ++intervals_jank_count_[i]; + } + + if (jank_end_index != local_jank_end_index) { + // OnBlockingCallCompleted() should have already ensured there's a |next_| + // chain covering |num_janky_intervals| unless it caused this to be + // |canceled_|. Exceptionally for this check, reading these fields when + // they're expected to be true is thread-safe as their only modification + // happened-before this point. + DCHECK(next_ || canceled_); + if (next_) { + // If |next_| is non-null, it means |this| wasn't canceled and it implies + // |next_| covers the time range starting immediately after this window. + DCHECK_EQ(next_->start_time_, start_time_ + kMonitoringWindow); + next_->AddJank(0, jank_end_index - local_jank_end_index); + } + } +} + +// static +Lock& IOJankMonitoringWindow::current_jank_window_lock() { + static NoDestructor<Lock> current_jank_window_lock; + return *current_jank_window_lock; +} + +// static +scoped_refptr<IOJankMonitoringWindow>& +IOJankMonitoringWindow::current_jank_window_storage() { + static NoDestructor<scoped_refptr<IOJankMonitoringWindow>> + current_jank_window; + return *current_jank_window; +} + +// static +IOJankReportingCallback& IOJankMonitoringWindow::reporting_callback_storage() { + static NoDestructor<IOJankReportingCallback> reporting_callback; + return *reporting_callback; +} + UncheckedScopedBlockingCall::UncheckedScopedBlockingCall( const Location& from_here, - BlockingType blocking_type) + BlockingType blocking_type, + BlockingCallType blocking_call_type) : blocking_observer_(tls_blocking_observer.Get().Get()), previous_scoped_blocking_call_(tls_last_scoped_blocking_call.Get().Get()), is_will_block_(blocking_type == BlockingType::WILL_BLOCK || @@ -49,6 +265,21 @@ UncheckedScopedBlockingCall::UncheckedScopedBlockingCall( scoped_activity_(from_here, 0, kActivityTrackerId, 0) { tls_last_scoped_blocking_call.Get().Set(this); + // Only monitor non-nested ScopedBlockingCall(MAY_BLOCK) calls on foreground + // threads. Cancels() any pending monitored call when a WILL_BLOCK or + // ScopedBlockingCallWithBaseSyncPrimitives nests into a + // ScopedBlockingCall(MAY_BLOCK). + if (!IsBackgroundPriorityWorker()) { + const bool is_monitored_type = + blocking_call_type == BlockingCallType::kRegular && !is_will_block_; + if (is_monitored_type && !previous_scoped_blocking_call_) { + monitored_call_.emplace(); + } else if (!is_monitored_type && previous_scoped_blocking_call_ && + previous_scoped_blocking_call_->monitored_call_) { + previous_scoped_blocking_call_->monitored_call_->Cancel(); + } + } + if (blocking_observer_) { if (!previous_scoped_blocking_call_) { blocking_observer_->BlockingStarted(blocking_type); @@ -60,7 +291,7 @@ UncheckedScopedBlockingCall::UncheckedScopedBlockingCall( if (scoped_activity_.IsRecorded()) { // Also record the data for extended crash reporting. - const base::TimeTicks now = base::TimeTicks::Now(); + const TimeTicks now = TimeTicks::Now(); auto& user_data = scoped_activity_.user_data(); user_data.SetUint("timestamp_us", now.since_origin().InMicroseconds()); user_data.SetUint("blocking_type", static_cast<uint64_t>(blocking_type)); @@ -70,7 +301,7 @@ UncheckedScopedBlockingCall::UncheckedScopedBlockingCall( UncheckedScopedBlockingCall::~UncheckedScopedBlockingCall() { // TLS affects result of GetLastError() on Windows. ScopedClearLastError // prevents side effect. - base::internal::ScopedClearLastError save_last_error; + ScopedClearLastError save_last_error; DCHECK_EQ(this, tls_last_scoped_blocking_call.Get().Get()); tls_last_scoped_blocking_call.Get().Set(previous_scoped_blocking_call_); if (blocking_observer_ && !previous_scoped_blocking_call_) @@ -78,4 +309,22 @@ UncheckedScopedBlockingCall::~UncheckedScopedBlockingCall() { } } // namespace internal + +void EnableIOJankMonitoringForProcess( + IOJankReportingCallback reporting_callback) { + { + AutoLock lock(internal::IOJankMonitoringWindow::current_jank_window_lock()); + + DCHECK(internal::IOJankMonitoringWindow::reporting_callback_storage() + .is_null()); + internal::IOJankMonitoringWindow::reporting_callback_storage() = + std::move(reporting_callback); + } + + // Make sure monitoring starts now rather than randomly at the next + // ScopedMonitoredCall construction. + internal::IOJankMonitoringWindow::MonitorNextJankWindowIfNecessary( + TimeTicks::Now()); +} + } // namespace base diff --git a/chromium/base/threading/scoped_blocking_call_internal.h b/chromium/base/threading/scoped_blocking_call_internal.h index 88c0bc3383e..73ba4810e6d 100644 --- a/chromium/base/threading/scoped_blocking_call_internal.h +++ b/chromium/base/threading/scoped_blocking_call_internal.h @@ -8,10 +8,18 @@ #include "base/base_export.h" #include "base/debug/activity_tracker.h" #include "base/macros.h" +#include "base/memory/ref_counted.h" +#include "base/optional.h" +#include "base/synchronization/lock.h" +#include "base/thread_annotations.h" +#include "base/time/time.h" namespace base { +// Forward-declare types from scoped_blocking_call.h to break cyclic dependency. enum class BlockingType; +using IOJankReportingCallback = RepeatingCallback<void(int, int)>; +void BASE_EXPORT EnableIOJankMonitoringForProcess(IOJankReportingCallback); // Implementation details of types in scoped_blocking_call.h and classes for a // few key //base types to observe and react to blocking calls. @@ -44,16 +52,124 @@ BASE_EXPORT void SetBlockingObserverForCurrentThread( BASE_EXPORT void ClearBlockingObserverForCurrentThread(); +// An IOJankMonitoringWindow instruments 1-minute of runtime. Any I/O jank > 1 +// second happening during that period will be reported to it. It will then +// report via the IOJankReportingCallback in |reporting_callback_storage()| if +// it's non-null. https://bit.ly/chrome-io-jank-metric. +class BASE_EXPORT IOJankMonitoringWindow + : public RefCountedThreadSafe<IOJankMonitoringWindow> { + public: + explicit IOJankMonitoringWindow(TimeTicks start_time); + + IOJankMonitoringWindow(const IOJankMonitoringWindow&) = delete; + IOJankMonitoringWindow& operator=(const IOJankMonitoringWindow&) = delete; + + // Cancels monitoring and clears this class' static state. + static void CancelMonitoringForTesting(); + + class ScopedMonitoredCall { + public: + // Stores a ref to the current IOJankMonitoringWindow if monitoring is + // active, keeping it alive at least until the monitored call completes or + // Cancel() is invoked. + ScopedMonitoredCall(); + + // Reports to |assigned_jank_window_| if it's non-null. + ~ScopedMonitoredCall(); + + ScopedMonitoredCall(const ScopedMonitoredCall&) = delete; + ScopedMonitoredCall& operator=(const ScopedMonitoredCall&) = delete; + + // Cancels monitoring of this call. + void Cancel(); + + private: + const TimeTicks call_start_; + scoped_refptr<IOJankMonitoringWindow> assigned_jank_window_; + }; + + static constexpr TimeDelta kIOJankInterval = TimeDelta::FromSeconds(1); + static constexpr TimeDelta kMonitoringWindow = TimeDelta::FromMinutes(1); + static constexpr TimeDelta kTimeDiscrepancyTimeout = kIOJankInterval * 10; + static constexpr int kNumIntervals = kMonitoringWindow / kIOJankInterval; + + private: + friend class base::RefCountedThreadSafe<IOJankMonitoringWindow>; + friend void base::EnableIOJankMonitoringForProcess(IOJankReportingCallback); + + // No-op if reporting_callback_storage() is null (i.e. unless + // EnableIOJankMonitoringForProcess() was called). + // When reporting_callback_storage() is non-null : Ensures that there's an + // active IOJankMonitoringWindow for Now(), connects it via |next_| to the + // previous IOJankMonitoringWindow to let ScopedMonitoredCalls that span + // multiple windows report to each window they cover. In the event that Now() + // is farther ahead than expected (> 10s), the previous window is |canceled_| + // as it was likely interrupted by a system sleep and a new + // IOJankMonitoringWindow chain is started from Now(). In all cases, returns a + // live reference to the current (old or new) IOJankMonitoringWindow as a + // helper so callers that need it don't need to re-acquire + // current_jank_window_lock() after calling this. + // |recent_now| is a recent sampling of TimeTicks::Now(), avoids + // double-sampling Now() from most callers. + static scoped_refptr<IOJankMonitoringWindow> MonitorNextJankWindowIfNecessary( + TimeTicks recent_now); + + // An IOJankMonitoringWindow is destroyed when all refs to it are gone, i.e.: + // 1) The window it covers has elapsed and MonitorNextJankWindowIfNecessary() + // has replaced it. + // 2) All pending ScopedMonitoredCall's in their range have completed + // (including the ones that transitively have it in their |next_| chain). + ~IOJankMonitoringWindow(); + + // Called from ~ScopedMonitoredCall(). + void OnBlockingCallCompleted(TimeTicks call_start, TimeTicks call_end); + + // Helper for OnBlockingCallCompleted(). Records |num_janky_intervals| + // starting at |local_jank_start_index|. Having this logic separately helps + // sane management of |intervals_lock_| when recursive calls through |next_| + // pointers are necessary. + void AddJank(int local_jank_start_index, int num_janky_intervals); + + static Lock& current_jank_window_lock(); + static scoped_refptr<IOJankMonitoringWindow>& current_jank_window_storage() + EXCLUSIVE_LOCKS_REQUIRED(current_jank_window_lock()); + + // Storage for callback used to report monitoring results. + // NullCallback if monitoring was not enabled for this process. + static IOJankReportingCallback& reporting_callback_storage() + EXCLUSIVE_LOCKS_REQUIRED(current_jank_window_lock()); + + Lock intervals_lock_; + size_t intervals_jank_count_[kNumIntervals] GUARDED_BY(intervals_lock_) = {}; + + const TimeTicks start_time_; + + // Set only once per window, in MonitorNextJankWindowIfNecessary(). Any read + // of this value must be ordered after that call in memory and in time. + scoped_refptr<IOJankMonitoringWindow> next_; + + // Set to true if ~IOJankMonitoringWindow() shouldn't record metrics. + // Modifications of this variable must be synchronized with each other and + // happen-before ~IOJankMonitoringWindow(). + bool canceled_ = false; +}; + // Common implementation class for both ScopedBlockingCall and // ScopedBlockingCallWithBaseSyncPrimitives without assertions. class BASE_EXPORT UncheckedScopedBlockingCall { public: + enum class BlockingCallType { + kRegular, + kBaseSyncPrimitives, + }; + explicit UncheckedScopedBlockingCall(const Location& from_here, - BlockingType blocking_type); + BlockingType blocking_type, + BlockingCallType blocking_call_type); ~UncheckedScopedBlockingCall(); private: - internal::BlockingObserver* const blocking_observer_; + BlockingObserver* const blocking_observer_; // Previous ScopedBlockingCall instantiated on this thread. UncheckedScopedBlockingCall* const previous_scoped_blocking_call_; @@ -64,6 +180,10 @@ class BASE_EXPORT UncheckedScopedBlockingCall { base::debug::ScopedActivity scoped_activity_; + // Non-nullopt for non-nested blocking calls of type MAY_BLOCK on foreground + // threads which we monitor for I/O jank. + Optional<IOJankMonitoringWindow::ScopedMonitoredCall> monitored_call_; + DISALLOW_COPY_AND_ASSIGN(UncheckedScopedBlockingCall); }; diff --git a/chromium/base/threading/scoped_blocking_call_unittest.cc b/chromium/base/threading/scoped_blocking_call_unittest.cc index 2c71e5acf72..9160a8c351c 100644 --- a/chromium/base/threading/scoped_blocking_call_unittest.cc +++ b/chromium/base/threading/scoped_blocking_call_unittest.cc @@ -5,13 +5,27 @@ #include "base/threading/scoped_blocking_call.h" #include <memory> +#include <utility> +#include <vector> +#include "base/barrier_closure.h" +#include "base/bind.h" +#include "base/callback.h" #include "base/macros.h" +#include "base/task/thread_pool/environment_config.h" +#include "base/task/thread_pool/thread_pool_impl.h" +#include "base/test/bind_test_util.h" #include "base/test/gtest_util.h" +#include "base/test/task_environment.h" +#include "base/test/test_waitable_event.h" #include "base/threading/scoped_blocking_call_internal.h" +#include "base/threading/thread_restrictions.h" +#include "build/build_config.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" +using testing::ElementsAre; + namespace base { namespace { @@ -147,4 +161,647 @@ TEST(ScopedBlockingCallDestructionOrderTest, InvalidDestructionOrder) { EXPECT_DCHECK_DEATH({ scoped_blocking_call_a.reset(); }); } +class ScopedBlockingCallIOJankMonitoringTest : public testing::Test { + public: + ScopedBlockingCallIOJankMonitoringTest() = default; + + void SetUp() override { + // Note 1: While EnableIOJankMonitoringForProcess() is documented as being + // only callable once per process. The call to CancelMonitoringForTesting() + // in TearDown() makes it okay to call this in multiple tests in a row + // within a single process. + // Note 2: No need to check TimeTicks::IsConsistentAcrossProcesses() in + // spite of EnableIOJankMonitoringForProcess()'s requirement as + // TimeSource::MOCK_TIME avoids usage of the system clock and avoids the + // issue. + EnableIOJankMonitoringForProcess(BindLambdaForTesting( + [&](int janky_intervals_per_minute, int total_janks_per_minute) { + reports_.push_back( + {janky_intervals_per_minute, total_janks_per_minute}); + })); + } + + void TearDown() override { + internal::IOJankMonitoringWindow::CancelMonitoringForTesting(); + } + + protected: + // A member initialized before |task_environment_| that forces worker threads + // to be started synchronously. This avoids a tricky race where Linux invokes + // SetCurrentThreadPriority() from early main, before invoking ThreadMain and + // yielding control to the thread pool impl. That causes a ScopedBlockingCall + // in platform_thread_linux.cc:SetThreadCgroupForThreadPriority and interferes + // with this test. This solution is quite intrusive but is the simplest we can + // do for this unique corner case. + struct SetSynchronousThreadStart { + SetSynchronousThreadStart() { + internal::ThreadPoolImpl::SetSynchronousThreadStartForTesting(true); + } + ~SetSynchronousThreadStart() { + internal::ThreadPoolImpl::SetSynchronousThreadStartForTesting(false); + } + } set_synchronous_thread_start_; + + // The registered lambda above may report to this from any thread. It is + // nonetheless safe to read this from the test body as + // TaskEnvironment+MOCK_TIME advances the test in lock steps. + std::vector<std::pair<int, int>> reports_; + + test::TaskEnvironment task_environment_{ + test::TaskEnvironment::TimeSource::MOCK_TIME}; +}; + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, Basic) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + // Advance precisely to the end of this window. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow - kJankTiming); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 7))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, NestedDoesntMatter) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + ScopedBlockingCall nested(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + // Jump to the next window. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 7))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, ManyInAWindow) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + constexpr auto kIdleTiming = TimeDelta::FromSeconds(3); + + for (int i = 0; i < 3; ++i) { + { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + task_environment_.FastForwardBy(kIdleTiming); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + // Complete the current window. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow - + (kJankTiming + kIdleTiming) * 3); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7 * 3, 7 * 3))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, OverlappingMultipleWindows) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kMonitoringWindow * 3 + + internal::IOJankMonitoringWindow::kIOJankInterval * 5; + + { + ScopedBlockingCall blocked_for_3windows(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + + // Fast-forward by another window with no active blocking calls. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + // 3 windows janky for their full breadth and 1 window janky for 5 seconds. + EXPECT_THAT(reports_, + ElementsAre(std::make_pair(60, 60), std::make_pair(60, 60), + std::make_pair(60, 60), std::make_pair(5, 5))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, InstantUnblockReportsZero) { + { ScopedBlockingCall instant_unblock(FROM_HERE, BlockingType::MAY_BLOCK); } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); + + // No blocking call in next window also reports zero. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + EXPECT_THAT(reports_, + ElementsAre(std::make_pair(0, 0), std::make_pair(0, 0))); +} + +// Start the jank mid-interval; that interval shouldn't be counted but the last +// incomplete interval will count. +TEST_F(ScopedBlockingCallIOJankMonitoringTest, Jank7sMidInterval) { + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kIOJankInterval / 3); + + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 7))); +} + +// Start the jank mid-interval; that interval shouldn't be counted but the +// second one should be despite being incomplete. +TEST_F(ScopedBlockingCallIOJankMonitoringTest, Jank1sMidInterval) { + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kIOJankInterval / 3); + + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval; + { + ScopedBlockingCall blocked_for_1s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(1, 1))); +} + +// Start mid-interval and perform an operation that overlaps into the next one +// but is under the jank timing. +TEST_F(ScopedBlockingCallIOJankMonitoringTest, NoJankMidInterval) { + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kIOJankInterval / 3); + + { + ScopedBlockingCall non_janky(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kIOJankInterval - + TimeDelta::FromMilliseconds(1)); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, MultiThreaded) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + + // Every worker needs to block for precise clock management; hence we can't + // test beyond the TaskEnvironment's capacity. + const int kNumJankyTasks = + test::TaskEnvironment::kNumForegroundThreadPoolThreads; + + TestWaitableEvent all_threads_blocked; + auto on_thread_blocked = BarrierClosure( + kNumJankyTasks, + BindOnce(&TestWaitableEvent::Signal, Unretained(&all_threads_blocked))); + + TestWaitableEvent resume_all_threads; + + for (int i = 0; i < kNumJankyTasks; ++i) { + base::ThreadPool::PostTask( + FROM_HERE, {MayBlock()}, BindLambdaForTesting([&]() { + ScopedBlockingCall blocked_until_signal(FROM_HERE, + BlockingType::MAY_BLOCK); + on_thread_blocked.Run(); + + ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + resume_all_threads.Wait(); + })); + } + + all_threads_blocked.Wait(); + task_environment_.AdvanceClock(kJankTiming); + resume_all_threads.Signal(); + task_environment_.RunUntilIdle(); + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + // Still only 7 janky internals, but more overall janks. + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 7 * kNumJankyTasks))); +} + +// 3 janks of 3 seconds; overlapping but starting 1 second apart from each +// other. +TEST_F(ScopedBlockingCallIOJankMonitoringTest, MultiThreadedOverlapped) { + static const int kNumJankyTasks = 3; + static_assert( + kNumJankyTasks <= test::TaskEnvironment::kNumForegroundThreadPoolThreads, + ""); + + TestWaitableEvent next_task_is_blocked(WaitableEvent::ResetPolicy::AUTOMATIC); + + TestWaitableEvent resume_thread[kNumJankyTasks] = {}; + TestWaitableEvent exited_blocking_scope[kNumJankyTasks] = {}; + + auto blocking_task = BindLambdaForTesting([&](int task_index) { + { + // Simulate jank until |resume_thread[task_index]| is signaled. + ScopedBlockingCall blocked_until_signal(FROM_HERE, + BlockingType::MAY_BLOCK); + next_task_is_blocked.Signal(); + + ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + resume_thread[task_index].Wait(); + } + exited_blocking_scope[task_index].Signal(); + }); + + // [0-1]s + base::ThreadPool::PostTask(FROM_HERE, {MayBlock()}, + BindOnce(blocking_task, 0)); + next_task_is_blocked.Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kIOJankInterval); + + // [1-2]s + base::ThreadPool::PostTask(FROM_HERE, {MayBlock()}, + BindOnce(blocking_task, 1)); + next_task_is_blocked.Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kIOJankInterval); + + // [2-3]s + base::ThreadPool::PostTask(FROM_HERE, {MayBlock()}, + BindOnce(blocking_task, 2)); + next_task_is_blocked.Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kIOJankInterval); + + // [3-6]s + for (int i = 0; i < kNumJankyTasks; ++i) { + resume_thread[i].Signal(); + exited_blocking_scope[i].Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kIOJankInterval); + } + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + // 9s of total janks spread across 5 intervals. + EXPECT_THAT(reports_, ElementsAre(std::make_pair(5, 9))); +} + +// 3 janks of 180 seconds; overlapping but starting 60s apart from each other. +// First one starting at 10 seconds (can't start later than that or we'll trip +// the kTimeDiscrepancyTimeout per TaskEnvironment's inability to RunUntilIdle() +// with pending blocked tasks). +#if defined(OS_LINUX) || defined(OS_CHROMEOS) +// https://crbug.com/1071166 +#define MAYBE_MultiThreadedOverlappedWindows \ + DISABLED_MultiThreadedOverlappedWindows +#else +#define MAYBE_MultiThreadedOverlappedWindows MultiThreadedOverlappedWindows +#endif +TEST_F(ScopedBlockingCallIOJankMonitoringTest, + MAYBE_MultiThreadedOverlappedWindows) { + static const int kNumJankyTasks = 3; + static_assert( + kNumJankyTasks <= test::TaskEnvironment::kNumForegroundThreadPoolThreads, + ""); + + TestWaitableEvent next_task_is_blocked(WaitableEvent::ResetPolicy::AUTOMATIC); + + TestWaitableEvent resume_thread[kNumJankyTasks] = {}; + TestWaitableEvent exited_blocking_scope[kNumJankyTasks] = {}; + + auto blocking_task = BindLambdaForTesting([&](int task_index) { + { + // Simulate jank until |resume_thread[task_index]| is signaled. + ScopedBlockingCall blocked_until_signal(FROM_HERE, + BlockingType::MAY_BLOCK); + next_task_is_blocked.Signal(); + + ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + resume_thread[task_index].Wait(); + } + exited_blocking_scope[task_index].Signal(); + }); + + // [0-10s] (minus 1 ms to avoid reaching the timeout; this also tests the + // logic that intervals are rounded down to the starting interval (e.g. + // interval 9/60 in this case)). + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kTimeDiscrepancyTimeout - + TimeDelta::FromMilliseconds(1)); + + // [10-70]s + base::ThreadPool::PostTask(FROM_HERE, {MayBlock()}, + BindOnce(blocking_task, 0)); + next_task_is_blocked.Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + // [70-130]s + base::ThreadPool::PostTask(FROM_HERE, {MayBlock()}, + BindOnce(blocking_task, 1)); + next_task_is_blocked.Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + // [130-190]s + base::ThreadPool::PostTask(FROM_HERE, {MayBlock()}, + BindOnce(blocking_task, 2)); + next_task_is_blocked.Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + // [190-370]s + for (int i = 0; i < kNumJankyTasks; ++i) { + resume_thread[i].Signal(); + exited_blocking_scope[i].Wait(); + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kMonitoringWindow); + } + + // Already past the last window (relevant events end at 360s); flush the + // pending ripe delayed task that will complete the last window. + task_environment_.RunUntilIdle(); + + // 540s(180s*3) of total janks spread across 300 intervals in 6 windows. + // Distributed as such (zoomed out to 6 intervals per window): + // [011111] + // [122222] + // [233333] + // [322222] + // [21111] + // [100000] + // Starting at the 9th interval per the 10s-1ms offset start. + EXPECT_THAT(reports_, + ElementsAre(std::make_pair(51, 51), std::make_pair(60, 111), + std::make_pair(60, 171), std::make_pair(60, 129), + std::make_pair(60, 69), std::make_pair(9, 9))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, CancellationAcrossSleep) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kJankTiming); + } + + // Jump just beyond the kTimeDiscrepancyTimeout for the next window. + task_environment_.AdvanceClock( + internal::IOJankMonitoringWindow::kMonitoringWindow + + internal::IOJankMonitoringWindow::kTimeDiscrepancyTimeout - kJankTiming); + task_environment_.RunUntilIdle(); + + // Window was canceled and previous jank was not reported. + EXPECT_THAT(reports_, ElementsAre()); + + // The second window should be independent and need a full kMonitoringWindow + // to elapse before reporting. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow - + TimeDelta::FromSeconds(1)); + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy(TimeDelta::FromSeconds(1)); + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, SleepWithLongJank) { + { + ScopedBlockingCall blocked_through_sleep(FROM_HERE, + BlockingType::MAY_BLOCK); + + // Fast-forward 2 full windows and almost to the end of the 3rd. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow * 3 - + TimeDelta::FromSeconds(1)); + + // Simulate a "sleep" over the timeout threshold. + task_environment_.AdvanceClock( + TimeDelta::FromSeconds(1) + + internal::IOJankMonitoringWindow::kTimeDiscrepancyTimeout); + } + + // Two full jank windows are reported when the ScopedBlokcingCall unwinds but + // the 3rd is canceled. + EXPECT_THAT(reports_, + ElementsAre(std::make_pair(60, 60), std::make_pair(60, 60))); + + // The 4th window has a new |start_time| so completing the "remaining delta" + // doesn't cause a report from the cancelled 3rd window. + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow - + TimeDelta::FromSeconds(1)); + EXPECT_THAT(reports_, + ElementsAre(std::make_pair(60, 60), std::make_pair(60, 60))); + + // Completing the whole 4th window generates a report. + task_environment_.FastForwardBy(TimeDelta::FromSeconds(1)); + EXPECT_THAT(reports_, + ElementsAre(std::make_pair(60, 60), std::make_pair(60, 60), + std::make_pair(0, 0))); +} + +// Verifies that blocking calls on background workers aren't monitored. +// Platforms where !CanUseBackgroundPriorityForWorkerThread() will still monitor +// this jank (as it may interfere with other foreground work). +TEST_F(ScopedBlockingCallIOJankMonitoringTest, BackgroundBlockingCallsIgnored) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + + TestWaitableEvent task_running; + TestWaitableEvent resume_task; + + base::ThreadPool::PostTask( + FROM_HERE, {TaskPriority::BEST_EFFORT, MayBlock()}, + BindLambdaForTesting([&]() { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_running.Signal(); + + ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + resume_task.Wait(); + })); + + task_running.Wait(); + task_environment_.AdvanceClock(kJankTiming); + resume_task.Signal(); + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + if (internal::CanUseBackgroundPriorityForWorkerThread()) + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); + else + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 7))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, + BackgroundAndForegroundCallsMixed) { + constexpr auto kJankTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + + TestWaitableEvent tasks_running; + auto on_task_running = BarrierClosure( + 2, BindOnce(&TestWaitableEvent::Signal, Unretained(&tasks_running))); + TestWaitableEvent resume_tasks; + + base::ThreadPool::PostTask( + FROM_HERE, {TaskPriority::BEST_EFFORT, MayBlock()}, + BindLambdaForTesting([&]() { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + on_task_running.Run(); + + ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + resume_tasks.Wait(); + })); + + base::ThreadPool::PostTask( + FROM_HERE, {TaskPriority::USER_BLOCKING, MayBlock()}, + BindLambdaForTesting([&]() { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + on_task_running.Run(); + + ScopedAllowBaseSyncPrimitivesForTesting allow_wait; + resume_tasks.Wait(); + })); + + tasks_running.Wait(); + task_environment_.AdvanceClock(kJankTiming); + resume_tasks.Signal(); + + // No janks reported before the monitoring window completes. + EXPECT_THAT(reports_, ElementsAre()); + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + if (internal::CanUseBackgroundPriorityForWorkerThread()) + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 7))); + else + EXPECT_THAT(reports_, ElementsAre(std::make_pair(7, 14))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, WillBlockNotMonitored) { + constexpr auto kBlockedTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_7s(FROM_HERE, BlockingType::WILL_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + } + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, + NestedWillBlockCancelsMonitoring) { + constexpr auto kBlockedTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_14s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + ScopedBlockingCall will_block_for_7s(FROM_HERE, BlockingType::WILL_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + } + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, NestedMayBlockIgnored) { + constexpr auto kBlockedTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_14s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + ScopedBlockingCall may_block_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + } + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(14, 14))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, BaseSyncPrimitivesNotMonitored) { + constexpr auto kBlockedTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + // Even with MAY_BLOCK; base-sync-primitives aren't considered I/O jank + // (base-sync-primitives induced janks/hangs are captured by other tools, + // like Slow Reports and HangWatcher). + internal::ScopedBlockingCallWithBaseSyncPrimitives + base_sync_primitives_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + } + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); +} + +TEST_F(ScopedBlockingCallIOJankMonitoringTest, + NestedBaseSyncPrimitivesCancels) { + constexpr auto kBlockedTiming = + internal::IOJankMonitoringWindow::kIOJankInterval * 7; + { + ScopedBlockingCall blocked_for_14s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + internal::ScopedBlockingCallWithBaseSyncPrimitives + base_sync_primitives_for_7s(FROM_HERE, BlockingType::MAY_BLOCK); + task_environment_.FastForwardBy(kBlockedTiming); + } + + task_environment_.FastForwardBy( + internal::IOJankMonitoringWindow::kMonitoringWindow); + + EXPECT_THAT(reports_, ElementsAre(std::make_pair(0, 0))); +} + } // namespace base diff --git a/chromium/base/threading/scoped_thread_priority.cc b/chromium/base/threading/scoped_thread_priority.cc index 0fdee1eb150..396071906f2 100644 --- a/chromium/base/threading/scoped_thread_priority.cc +++ b/chromium/base/threading/scoped_thread_priority.cc @@ -18,7 +18,7 @@ ScopedMayLoadLibraryAtBackgroundPriority:: from_here.function_name()); } -bool ScopedMayLoadLibraryAtBackgroundPriority::OnScopeFirstEntered() { +bool ScopedMayLoadLibraryAtBackgroundPriority::OnScopeEntered() { #if defined(OS_WIN) const base::ThreadPriority priority = PlatformThread::GetCurrentThreadPriority(); diff --git a/chromium/base/threading/scoped_thread_priority.h b/chromium/base/threading/scoped_thread_priority.h index 616470f5f84..c1bd9a4f08e 100644 --- a/chromium/base/threading/scoped_thread_priority.h +++ b/chromium/base/threading/scoped_thread_priority.h @@ -41,20 +41,34 @@ enum class ThreadPriority : int; // The macro raises the thread priority to NORMAL for the scope when first // encountered. On Windows, loading a DLL on a background thread can lead to a // priority inversion on the loader lock and cause huge janks. -#define SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY() \ - base::internal::ScopedMayLoadLibraryAtBackgroundPriority \ - INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE( \ - scoped_may_load_library_at_background_priority)(FROM_HERE); \ - { \ - /* Thread-safe static local variable initialization ensures that */ \ - /* OnScopeFirstEntered() is only invoked the first time that this is */ \ - /* encountered. */ \ - static bool INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE(invoke_once) = \ - INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE( \ - scoped_may_load_library_at_background_priority) \ - .OnScopeFirstEntered(); \ - ALLOW_UNUSED_LOCAL( \ - INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE(invoke_once)); \ +#define SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY() \ + base::internal::ScopedMayLoadLibraryAtBackgroundPriority \ + INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE( \ + scoped_may_load_library_at_background_priority)(FROM_HERE); \ + { \ + /* Thread-safe static local variable initialization ensures that */ \ + /* OnScopeEntered() is only invoked the first time that this is */ \ + /* encountered. */ \ + static bool INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE(invoke_once) = \ + INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE( \ + scoped_may_load_library_at_background_priority) \ + .OnScopeEntered(); \ + ALLOW_UNUSED_LOCAL( \ + INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE(invoke_once)); \ + } + +// Like SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY, but raises the thread +// priority every time the scope is entered. Use this around code that may +// conditionally load a DLL each time it is executed, or which repeatedly loads +// and unloads DLLs. +#define SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY_REPEATEDLY() \ + base::internal::ScopedMayLoadLibraryAtBackgroundPriority \ + INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE( \ + scoped_may_load_library_at_background_priority)(FROM_HERE); \ + { \ + INTERNAL_SCOPED_THREAD_PRIORITY_APPEND_LINE( \ + scoped_may_load_library_at_background_priority) \ + .OnScopeEntered(); \ } namespace internal { @@ -64,13 +78,15 @@ class BASE_EXPORT ScopedMayLoadLibraryAtBackgroundPriority { explicit ScopedMayLoadLibraryAtBackgroundPriority(const Location& from_here); ~ScopedMayLoadLibraryAtBackgroundPriority(); - // The SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY() macro invokes this the - // first time that it is encountered. - bool OnScopeFirstEntered(); + // The SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY() macro invokes this + // the first time that it is encountered. The + // SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY_REPEATEDLY() macro invokes + // this every time it is encountered. + bool OnScopeEntered(); private: #if defined(OS_WIN) - // The original priority when invoking OnScopeFirstEntered(). + // The original priority when invoking OnScopeEntered(). base::Optional<ThreadPriority> original_thread_priority_; #endif diff --git a/chromium/base/threading/scoped_thread_priority_unittest.cc b/chromium/base/threading/scoped_thread_priority_unittest.cc index 7778e1fd0a9..aff6ce20702 100644 --- a/chromium/base/threading/scoped_thread_priority_unittest.cc +++ b/chromium/base/threading/scoped_thread_priority_unittest.cc @@ -35,6 +35,13 @@ void FunctionThatBoostsPriorityOnFirstInvoke(ThreadPriority expected_priority) { SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY(); EXPECT_EQ(expected_priority, PlatformThread::GetCurrentThreadPriority()); } + +void FunctionThatBoostsPriorityOnEveryInvoke() { + SCOPED_MAY_LOAD_LIBRARY_AT_BACKGROUND_PRIORITY_REPEATEDLY(); + EXPECT_EQ(base::ThreadPriority::NORMAL, + PlatformThread::GetCurrentThreadPriority()); +} + #endif // OS_WIN } // namespace @@ -110,6 +117,17 @@ TEST_F(ScopedThreadPriorityTest, FunctionThatBoostsPriorityOnFirstInvoke) { // Put back the default thread priority. PlatformThread::SetCurrentThreadPriority(ThreadPriority::NORMAL); } + +TEST_F(ScopedThreadPriorityTest, FunctionThatBoostsPriorityOnEveryInvoke) { + PlatformThread::SetCurrentThreadPriority(ThreadPriority::BACKGROUND); + + FunctionThatBoostsPriorityOnEveryInvoke(); + FunctionThatBoostsPriorityOnEveryInvoke(); + + // Put back the default thread priority. + PlatformThread::SetCurrentThreadPriority(ThreadPriority::NORMAL); +} + #endif // OS_WIN } // namespace base diff --git a/chromium/base/threading/sequence_local_storage_map.cc b/chromium/base/threading/sequence_local_storage_map.cc index 2837aa03409..fbd863aadb6 100644 --- a/chromium/base/threading/sequence_local_storage_map.cc +++ b/chromium/base/threading/sequence_local_storage_map.cc @@ -6,8 +6,8 @@ #include <utility> +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/threading/thread_local.h" namespace base { diff --git a/chromium/base/threading/sequence_local_storage_slot.cc b/chromium/base/threading/sequence_local_storage_slot.cc index 5ae1d9f47ae..aca57ee7926 100644 --- a/chromium/base/threading/sequence_local_storage_slot.cc +++ b/chromium/base/threading/sequence_local_storage_slot.cc @@ -7,7 +7,7 @@ #include <limits> #include "base/atomic_sequence_num.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace internal { diff --git a/chromium/base/threading/sequenced_task_runner_handle.cc b/chromium/base/threading/sequenced_task_runner_handle.cc index 2bfc0cf42ee..4f329a0acb5 100644 --- a/chromium/base/threading/sequenced_task_runner_handle.cc +++ b/chromium/base/threading/sequenced_task_runner_handle.cc @@ -6,8 +6,8 @@ #include <utility> +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/threading/thread_local.h" namespace base { diff --git a/chromium/base/threading/simple_thread.cc b/chromium/base/threading/simple_thread.cc index 2477bc76865..b76f8a016b7 100644 --- a/chromium/base/threading/simple_thread.cc +++ b/chromium/base/threading/simple_thread.cc @@ -4,7 +4,7 @@ #include "base/threading/simple_thread.h" -#include "base/logging.h" +#include "base/check.h" #include "base/strings/string_number_conversions.h" #include "base/threading/platform_thread.h" #include "base/threading/thread_restrictions.h" diff --git a/chromium/base/threading/thread_checker_impl.cc b/chromium/base/threading/thread_checker_impl.cc index 18d5ea32de4..f282acfec81 100644 --- a/chromium/base/threading/thread_checker_impl.cc +++ b/chromium/base/threading/thread_checker_impl.cc @@ -4,7 +4,7 @@ #include "base/threading/thread_checker_impl.h" -#include "base/logging.h" +#include "base/check.h" #include "base/threading/thread_local.h" #include "base/threading/thread_task_runner_handle.h" diff --git a/chromium/base/threading/thread_collision_warner.cc b/chromium/base/threading/thread_collision_warner.cc index 547e11ca66f..d6d87094818 100644 --- a/chromium/base/threading/thread_collision_warner.cc +++ b/chromium/base/threading/thread_collision_warner.cc @@ -4,7 +4,7 @@ #include "base/threading/thread_collision_warner.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/threading/platform_thread.h" namespace base { diff --git a/chromium/base/threading/thread_id_name_manager.cc b/chromium/base/threading/thread_id_name_manager.cc index ba2f9b41cb1..8abaa40722c 100644 --- a/chromium/base/threading/thread_id_name_manager.cc +++ b/chromium/base/threading/thread_id_name_manager.cc @@ -7,7 +7,7 @@ #include <stdlib.h> #include <string.h> -#include "base/logging.h" +#include "base/check.h" #include "base/memory/singleton.h" #include "base/no_destructor.h" #include "base/stl_util.h" diff --git a/chromium/base/threading/thread_local_internal.h b/chromium/base/threading/thread_local_internal.h index 6f7fdc97684..184b2d3e850 100644 --- a/chromium/base/threading/thread_local_internal.h +++ b/chromium/base/threading/thread_local_internal.h @@ -9,6 +9,7 @@ #include <atomic> #include <memory> +#include <ostream> #include "base/macros.h" #include "base/threading/thread_local_storage.h" diff --git a/chromium/base/threading/thread_local_storage.cc b/chromium/base/threading/thread_local_storage.cc index 204f34c2721..efbd1b29a55 100644 --- a/chromium/base/threading/thread_local_storage.cc +++ b/chromium/base/threading/thread_local_storage.cc @@ -5,9 +5,10 @@ #include "base/threading/thread_local_storage.h" #include "base/atomicops.h" +#include "base/check_op.h" #include "base/compiler_specific.h" -#include "base/logging.h" #include "base/no_destructor.h" +#include "base/notreached.h" #include "base/synchronization/lock.h" #include "build/build_config.h" diff --git a/chromium/base/threading/thread_local_storage_posix.cc b/chromium/base/threading/thread_local_storage_posix.cc index 89edeee1d2a..1a7d07641b8 100644 --- a/chromium/base/threading/thread_local_storage_posix.cc +++ b/chromium/base/threading/thread_local_storage_posix.cc @@ -4,7 +4,7 @@ #include "base/threading/thread_local_storage.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { diff --git a/chromium/base/threading/thread_local_storage_win.cc b/chromium/base/threading/thread_local_storage_win.cc index a9aec31da53..106214d3e4b 100644 --- a/chromium/base/threading/thread_local_storage_win.cc +++ b/chromium/base/threading/thread_local_storage_win.cc @@ -6,7 +6,7 @@ #include <windows.h> -#include "base/logging.h" +#include "base/check.h" namespace base { diff --git a/chromium/base/threading/thread_local_unittest.cc b/chromium/base/threading/thread_local_unittest.cc index 47f4b54ccc6..ac0c7410943 100644 --- a/chromium/base/threading/thread_local_unittest.cc +++ b/chromium/base/threading/thread_local_unittest.cc @@ -3,7 +3,7 @@ // found in the LICENSE file. #include "base/threading/thread_local.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/macros.h" #include "base/optional.h" #include "base/synchronization/waitable_event.h" diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc index 7a94f7ccc5e..c7f07d26646 100644 --- a/chromium/base/threading/thread_restrictions.cc +++ b/chromium/base/threading/thread_restrictions.cc @@ -8,9 +8,9 @@ #if DCHECK_IS_ON() +#include "base/check_op.h" #include "base/debug/stack_trace.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/threading/thread_local.h" #include "build/build_config.h" diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h index 5f030902af5..33df64f30e8 100644 --- a/chromium/base/threading/thread_restrictions.h +++ b/chromium/base/threading/thread_restrictions.h @@ -96,6 +96,7 @@ // that's okay. class BrowserProcessImpl; +class ChromeNSSCryptoModuleDelegate; class HistogramSynchronizer; class KeyStorageLinux; class NativeBackendKWallet; @@ -147,6 +148,7 @@ class CategorizedWorkerPool; class DesktopCaptureDevice; class InProcessUtilityThread; class NestedMessagePumpAndroid; +class PepperPrintSettingsManagerImpl; class RenderProcessHostImpl; class RenderWidgetHostViewMac; class RTCVideoDecoder; @@ -158,6 +160,7 @@ class SynchronousCompositor; class SynchronousCompositorHost; class SynchronousCompositorSyncCallBridge; class TextInputClientMac; +class WaitForProcessesToDumpProfilingInfo; class WebContentsViewMac; } // namespace content namespace cronet { @@ -206,6 +209,7 @@ class ScopedIPCSupport; } } namespace printing { +class LocalPrinterHandlerDefault; class PrintJobWorker; class PrinterQuery; } @@ -225,7 +229,7 @@ class GpuState; } namespace weblayer { class BrowserContextImpl; -class BrowserProcess; +class ContentBrowserClientImpl; class ProfileImpl; class WebLayerPathProvider; } @@ -308,6 +312,7 @@ class ScopedAllowThreadRecallForStackSamplingProfiler; class SimpleThread; class StackSamplingProfiler; class Thread; +class WaitableEvent; bool PathProviderWin(int, FilePath*); @@ -364,6 +369,7 @@ class BASE_EXPORT ScopedAllowBlocking { friend class android_webview::ScopedAllowInitGLBindings; friend class chromeos::MojoUtils; // http://crbug.com/1055467 friend class content::BrowserProcessSubThread; + friend class content::PepperPrintSettingsManagerImpl; friend class content::RenderProcessHostImpl; friend class content::RenderWidgetHostViewMac; // http://crbug.com/121917 friend class content::WebContentsViewMac; @@ -372,11 +378,12 @@ class BASE_EXPORT ScopedAllowBlocking { friend class memory_instrumentation::OSMetrics; friend class module_installer::ScopedAllowModulePakLoad; friend class mojo::CoreLibraryInitializer; + friend class printing::LocalPrinterHandlerDefault; friend class printing::PrintJobWorker; friend class resource_coordinator::TabManagerDelegate; // crbug.com/778703 friend class web::WebSubThread; friend class weblayer::BrowserContextImpl; - friend class weblayer::BrowserProcess; + friend class weblayer::ContentBrowserClientImpl; friend class weblayer::ProfileImpl; friend class weblayer::WebLayerPathProvider; @@ -422,6 +429,7 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitives { // Allowed usage: friend class SimpleThread; + friend class ::ChromeNSSCryptoModuleDelegate; friend class base::GetAppOutputScopedAllowBaseSyncPrimitives; friend class blink::SourceStream; friend class blink::WorkerThread; @@ -449,7 +457,6 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitives { friend class ::NativeBackendKWallet; // http://crbug.com/125331 friend class ::chromeos::system:: StatisticsProviderImpl; // http://crbug.com/125385 - friend class content::TextInputClientMac; // http://crbug.com/121917 friend class blink::VideoFrameResourceProvider; // http://crbug.com/878070 ScopedAllowBaseSyncPrimitives() EMPTY_BODY_IF_DCHECK_IS_OFF; @@ -500,6 +507,7 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope { friend class content::SynchronousCompositor; friend class content::SynchronousCompositorHost; friend class content::SynchronousCompositorSyncCallBridge; + friend class content::WaitForProcessesToDumpProfilingInfo; friend class media::AudioInputDevice; friend class media::AudioOutputDevice; friend class media::PaintCanvasVideoRenderer; @@ -533,6 +541,7 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope { // Not used in production yet, https://crbug.com/844078. friend class service_manager::ServiceProcessLauncher; friend class ui::WindowResizeHelperMac; // http://crbug.com/902829 + friend class content::TextInputClientMac; // http://crbug.com/121917 ScopedAllowBaseSyncPrimitivesOutsideBlockingScope( const Location& from_here = Location::Current()); diff --git a/chromium/base/threading/thread_task_runner_handle.cc b/chromium/base/threading/thread_task_runner_handle.cc index 4a9ac88f61c..ca57b36de2c 100644 --- a/chromium/base/threading/thread_task_runner_handle.cc +++ b/chromium/base/threading/thread_task_runner_handle.cc @@ -8,8 +8,8 @@ #include <utility> #include "base/bind.h" +#include "base/check_op.h" #include "base/lazy_instance.h" -#include "base/logging.h" #include "base/run_loop.h" #include "base/threading/thread_local.h" diff --git a/chromium/base/time/time_conversion_posix.cc b/chromium/base/time/time_conversion_posix.cc index ba0a2b29664..7d6c0a90144 100644 --- a/chromium/base/time/time_conversion_posix.cc +++ b/chromium/base/time/time_conversion_posix.cc @@ -10,7 +10,7 @@ #include <limits> -#include "base/logging.h" +#include "base/check_op.h" namespace base { diff --git a/chromium/base/time/time_exploded_icu.cc b/chromium/base/time/time_exploded_icu.cc index 620369b877f..41832e53912 100644 --- a/chromium/base/time/time_exploded_icu.cc +++ b/chromium/base/time/time_exploded_icu.cc @@ -6,7 +6,7 @@ #include <memory> -#include "base/logging.h" +#include "base/check.h" #include "base/memory/ptr_util.h" #include "third_party/icu/source/i18n/unicode/calendar.h" #include "third_party/icu/source/i18n/unicode/timezone.h" diff --git a/chromium/base/time/time_now_posix.cc b/chromium/base/time/time_now_posix.cc index 4ce93c08119..a7fdb62747f 100644 --- a/chromium/base/time/time_now_posix.cc +++ b/chromium/base/time/time_now_posix.cc @@ -12,7 +12,8 @@ #endif #include <unistd.h> -#include "base/logging.h" +#include "base/check.h" +#include "base/notreached.h" #include "base/numerics/safe_math.h" #include "base/time/time_override.h" #include "build/build_config.h" diff --git a/chromium/base/time/time_override.h b/chromium/base/time/time_override.h index ad3180c62a9..5d5bcd96b23 100644 --- a/chromium/base/time/time_override.h +++ b/chromium/base/time/time_override.h @@ -6,6 +6,7 @@ #define BASE_TIME_TIME_OVERRIDE_H_ #include "base/base_export.h" +#include "base/macros.h" #include "base/time/time.h" namespace base { diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc index 27a8ae19b51..dd9462cf08a 100644 --- a/chromium/base/time/time_unittest.cc +++ b/chromium/base/time/time_unittest.cc @@ -10,8 +10,8 @@ #include <string> #include "base/build_time.h" +#include "base/check_op.h" #include "base/compiler_specific.h" -#include "base/logging.h" #include "base/stl_util.h" #include "base/strings/stringprintf.h" #include "base/test/gtest_util.h" diff --git a/chromium/base/time/time_win.cc b/chromium/base/time/time_win.cc index c1976e64a6d..7c13f8783b2 100644 --- a/chromium/base/time/time_win.cc +++ b/chromium/base/time/time_win.cc @@ -40,9 +40,10 @@ #include "base/atomicops.h" #include "base/bit_cast.h" +#include "base/check_op.h" #include "base/cpu.h" #include "base/feature_list.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/synchronization/lock.h" #include "base/threading/platform_thread.h" #include "base/time/time_override.h" diff --git a/chromium/base/timer/lap_timer.cc b/chromium/base/timer/lap_timer.cc index 3ff2465496f..c569ee848ac 100644 --- a/chromium/base/timer/lap_timer.cc +++ b/chromium/base/timer/lap_timer.cc @@ -3,7 +3,7 @@ // found in the LICENSE file. #include "base/timer/lap_timer.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { diff --git a/chromium/base/timer/timer.cc b/chromium/base/timer/timer.cc index 815bc119573..d00ea97049b 100644 --- a/chromium/base/timer/timer.cc +++ b/chromium/base/timer/timer.cc @@ -8,7 +8,7 @@ #include <utility> -#include "base/logging.h" +#include "base/check.h" #include "base/memory/ptr_util.h" #include "base/memory/ref_counted.h" #include "base/threading/platform_thread.h" diff --git a/chromium/base/tools_sanity_unittest.cc b/chromium/base/tools_sanity_unittest.cc index 2ad89ac4c4b..2d2c78594e7 100644 --- a/chromium/base/tools_sanity_unittest.cc +++ b/chromium/base/tools_sanity_unittest.cc @@ -12,6 +12,7 @@ #include "base/cfi_buildflags.h" #include "base/debug/asan_invalid_access.h" #include "base/debug/profiler.h" +#include "base/logging.h" #include "base/sanitizer_buildflags.h" #include "base/third_party/dynamic_annotations/dynamic_annotations.h" #include "base/threading/thread.h" diff --git a/chromium/base/trace_event/blame_context.cc b/chromium/base/trace_event/blame_context.cc index e7599efa83f..a12f2354a13 100644 --- a/chromium/base/trace_event/blame_context.cc +++ b/chromium/base/trace_event/blame_context.cc @@ -4,6 +4,8 @@ #include "base/trace_event/blame_context.h" +#include <cstring> + #include "base/strings/stringprintf.h" #include "base/trace_event/trace_event.h" #include "base/trace_event/traced_value.h" diff --git a/chromium/base/trace_event/builtin_categories.h b/chromium/base/trace_event/builtin_categories.h index ac67c35bbcb..7ce21d4711b 100644 --- a/chromium/base/trace_event/builtin_categories.h +++ b/chromium/base/trace_event/builtin_categories.h @@ -30,6 +30,7 @@ X("accessibility") \ X("AccountFetcherService") \ X("android_webview") \ + X("aogh") \ X("audio") \ X("base") \ X("benchmark") \ @@ -46,8 +47,13 @@ X("browser") \ X("browsing_data") \ X("CacheStorage") \ + X("Calculators") \ + X("CameraStream") \ X("camera") \ + X("cast_app") \ X("cast_perf_test") \ + X("cast.mdns") \ + X("cast.mdns.socket") \ X("cast.stream") \ X("cc") \ X("cc.debug") \ @@ -67,7 +73,6 @@ X("drmcursor") \ X("dwrite") \ X("DXVA Decoding") \ - X("EarlyJava") \ X("evdev") \ X("event") \ X("exo") \ @@ -111,9 +116,12 @@ X("passwords") \ X("p2p") \ X("page-serialization") \ + X("paint_preview") \ X("pepper") \ + X("PlatformMalloc") \ X("ppapi") \ X("ppapi proxy") \ + X("print") \ X("rail") \ X("renderer") \ X("renderer_host") \ @@ -131,6 +139,8 @@ X("SiteEngagement") \ X("skia") \ X("sql") \ + X("stadia_media") \ + X("stadia_rtc") \ X("startup") \ X("sync") \ X("sync_lock_contention") \ @@ -153,6 +163,7 @@ X("webrtc") \ X("xr") \ X(TRACE_DISABLED_BY_DEFAULT("animation-worklet")) \ + X(TRACE_DISABLED_BY_DEFAULT("audio")) \ X(TRACE_DISABLED_BY_DEFAULT("audio-worklet")) \ X(TRACE_DISABLED_BY_DEFAULT("blink.debug")) \ X(TRACE_DISABLED_BY_DEFAULT("blink.debug.display_lock")) \ @@ -187,12 +198,14 @@ X(TRACE_DISABLED_BY_DEFAULT("gpu.decoder")) \ X(TRACE_DISABLED_BY_DEFAULT("gpu.device")) \ X(TRACE_DISABLED_BY_DEFAULT("gpu.service")) \ + X(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma")) \ X(TRACE_DISABLED_BY_DEFAULT("histogram_samples")) \ X(TRACE_DISABLED_BY_DEFAULT("java-heap-profiler")) \ X(TRACE_DISABLED_BY_DEFAULT("layer-element")) \ X(TRACE_DISABLED_BY_DEFAULT("layout_shift.debug")) \ X(TRACE_DISABLED_BY_DEFAULT("lifecycles")) \ X(TRACE_DISABLED_BY_DEFAULT("loading")) \ + X(TRACE_DISABLED_BY_DEFAULT("mediastream")) \ X(TRACE_DISABLED_BY_DEFAULT("memory-infra")) \ X(TRACE_DISABLED_BY_DEFAULT("memory-infra.v8.code_stats")) \ X(TRACE_DISABLED_BY_DEFAULT("net")) \ diff --git a/chromium/base/trace_event/category_registry.cc b/chromium/base/trace_event/category_registry.cc index 3002f49f51b..691336f8707 100644 --- a/chromium/base/trace_event/category_registry.cc +++ b/chromium/base/trace_event/category_registry.cc @@ -8,8 +8,9 @@ #include <type_traits> +#include "base/check.h" #include "base/debug/leak_annotations.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/third_party/dynamic_annotations/dynamic_annotations.h" namespace base { diff --git a/chromium/base/trace_event/common/trace_event_common.h b/chromium/base/trace_event/common/trace_event_common.h index a7bffbdbeb4..28b7275345c 100644 --- a/chromium/base/trace_event/common/trace_event_common.h +++ b/chromium/base/trace_event/common/trace_event_common.h @@ -837,6 +837,14 @@ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) #define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \ category_group, name, id, timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ diff --git a/chromium/base/trace_event/cpufreq_monitor_android_unittest.cc b/chromium/base/trace_event/cpufreq_monitor_android_unittest.cc index 45c3d0f0560..f6939522233 100644 --- a/chromium/base/trace_event/cpufreq_monitor_android_unittest.cc +++ b/chromium/base/trace_event/cpufreq_monitor_android_unittest.cc @@ -151,8 +151,7 @@ class CPUFreqMonitorTest : public testing::Test { std::string file_path = delegate_->GetScalingCurFreqPathString(pair.first); std::string str_freq = base::StringPrintf("%d\n", pair.second); - base::WriteFile(base::FilePath(file_path), str_freq.c_str(), - str_freq.length()); + base::WriteFile(base::FilePath(file_path), str_freq); } } @@ -160,8 +159,7 @@ class CPUFreqMonitorTest : public testing::Test { const std::vector<std::string>& related_cpus) { for (unsigned int i = 0; i < clusters.size(); i++) { base::WriteFile(base::FilePath(delegate_->GetRelatedCPUsPathString(i)), - related_cpus[clusters[i]].c_str(), - related_cpus[clusters[i]].length()); + related_cpus[clusters[i]]); } } diff --git a/chromium/base/trace_event/etw_manifest/BUILD.gn b/chromium/base/trace_event/etw_manifest/BUILD.gn index dab9e6fb109..a66fef9e3c8 100644 --- a/chromium/base/trace_event/etw_manifest/BUILD.gn +++ b/chromium/base/trace_event/etw_manifest/BUILD.gn @@ -9,7 +9,7 @@ assert(is_win, "This only runs on Windows.") message_compiler("chrome_events_win") { visibility = [ "//base/*", - "//chrome:main_dll", + "//chrome:chrome_dll", ] sources = [ "chrome_events_win.man" ] diff --git a/chromium/base/trace_event/features.gni b/chromium/base/trace_event/features.gni new file mode 100644 index 00000000000..7d6bb2a8509 --- /dev/null +++ b/chromium/base/trace_event/features.gni @@ -0,0 +1,12 @@ +# Copyright 2020 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Features used by //base/trace_event and //services/tracing. +declare_args() { + # Switches the TRACE_EVENT instrumentation from base's TraceLog implementation + # to //third_party/perfetto's client library. Not implemented yet, currently a + # no-op to set up trybot infrastructure. + # TODO(eseckler): Implement. + use_perfetto_client_library = false +} diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc index ef48f68f343..8e8bec8abb4 100644 --- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc +++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc @@ -4,14 +4,18 @@ #include "base/trace_event/heap_profiler_allocation_context_tracker.h" +#include <string.h> + #include <algorithm> #include <iterator> #include "base/atomicops.h" +#include "base/check_op.h" #include "base/debug/debugging_buildflags.h" #include "base/debug/leak_annotations.h" #include "base/debug/stack_trace.h" #include "base/no_destructor.h" +#include "base/notreached.h" #include "base/stl_util.h" #include "base/threading/platform_thread.h" #include "base/threading/thread_local_storage.h" diff --git a/chromium/base/trace_event/log_message.cc b/chromium/base/trace_event/log_message.cc index e6b02e44902..de8ee71b4d9 100644 --- a/chromium/base/trace_event/log_message.cc +++ b/chromium/base/trace_event/log_message.cc @@ -31,7 +31,7 @@ void LogMessage::AppendAsTraceFormat(std::string* out) const { void LogMessage::EstimateTraceMemoryOverhead( TraceEventMemoryOverhead* overhead) { - overhead->Add(TraceEventMemoryOverhead::kOther, sizeof(this)); + overhead->Add(TraceEventMemoryOverhead::kOther, sizeof(*this)); overhead->AddString(message_); } @@ -44,4 +44,4 @@ bool LogMessage::AppendToProto(ProtoAppender* appender) { } } // namespace trace_event -} // namespace base
\ No newline at end of file +} // namespace base diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc index 8be3c324047..1af450d8a47 100644 --- a/chromium/base/trace_event/memory_dump_request_args.cc +++ b/chromium/base/trace_event/memory_dump_request_args.cc @@ -4,7 +4,7 @@ #include "base/trace_event/memory_dump_request_args.h" -#include "base/logging.h" +#include "base/notreached.h" namespace base { namespace trace_event { diff --git a/chromium/base/trace_event/memory_dump_scheduler.cc b/chromium/base/trace_event/memory_dump_scheduler.cc index 8b03f5c90ba..ac9b12bae6a 100644 --- a/chromium/base/trace_event/memory_dump_scheduler.cc +++ b/chromium/base/trace_event/memory_dump_scheduler.cc @@ -8,7 +8,7 @@ #include <limits> #include "base/bind.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/threading/sequenced_task_runner_handle.h" namespace base { diff --git a/chromium/base/trace_event/memory_infra_background_allowlist.cc b/chromium/base/trace_event/memory_infra_background_allowlist.cc index f9a894eeb3a..7d1e5744c15 100644 --- a/chromium/base/trace_event/memory_infra_background_allowlist.cc +++ b/chromium/base/trace_event/memory_infra_background_allowlist.cc @@ -77,6 +77,7 @@ const char* const kAllocatorDumpNameAllowlist[] = { "blink_gc/main/heap", "blink_gc/workers/heap/worker_0x?", "blink_objects/AdSubframe", + "blink_objects/ArrayBufferContents", "blink_objects/AudioHandler", "blink_objects/ContextLifecycleStateObserver", "blink_objects/DetachedScriptState", diff --git a/chromium/base/trace_event/trace_arguments.cc b/chromium/base/trace_event/trace_arguments.cc index d3f71247934..0f283e00527 100644 --- a/chromium/base/trace_event/trace_arguments.cc +++ b/chromium/base/trace_event/trace_arguments.cc @@ -11,8 +11,9 @@ #include <cmath> +#include "base/check_op.h" #include "base/json/string_escape.h" -#include "base/logging.h" +#include "base/notreached.h" #include "base/stl_util.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_util.h" diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h index 4ab6b00702f..3e30dcd781a 100644 --- a/chromium/base/trace_event/trace_event.h +++ b/chromium/base/trace_event/trace_event.h @@ -157,19 +157,6 @@ #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ trace_event_internal::UpdateTraceEventDuration -// Set the duration field of a COMPLETE trace event. -// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT( -// const unsigned char* category_group_enabled, -// const char* name, -// base::trace_event::TraceEventHandle id, -// int thread_id, -// bool explicit_timestamps, -// const base::TimeTicks& now, -// const base::ThreadTicks& thread_now, -// base::trace_event::ThreadInstructionCount thread_instruction_now) -#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT \ - trace_event_internal::UpdateTraceEventDurationExplicit - // Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method // on the convertable value will be called at flush time. // TRACE_EVENT_API_ADD_METADATA_EVENT( @@ -371,8 +358,8 @@ // Implementation detail: internal macro to create static category and add // event if the category is enabled. #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMPS( \ - category_group, name, id, thread_id, begin_timestamp, end_timestamp, \ - thread_end_timestamp, flags, ...) \ + phase, category_group, name, id, thread_id, timestamp, thread_timestamp, \ + flags) \ do { \ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \ @@ -381,17 +368,11 @@ flags | trace_event_trace_id.id_flags(); \ const unsigned char* uid_category_group_enabled = \ INTERNAL_TRACE_EVENT_UID(category_group_enabled); \ - auto handle = \ - trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \ - TRACE_EVENT_PHASE_COMPLETE, uid_category_group_enabled, name, \ - trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \ - thread_id, begin_timestamp, \ - trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \ - trace_event_internal::kNoId, ##__VA_ARGS__); \ - TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT( \ - uid_category_group_enabled, name, handle, thread_id, \ - /*explicit_timestamps=*/true, end_timestamp, thread_end_timestamp, \ - base::trace_event::ThreadInstructionCount()); \ + trace_event_internal::AddTraceEventWithThreadIdAndTimestamps( \ + phase, uid_category_group_enabled, name, \ + trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \ + thread_id, timestamp, thread_timestamp, \ + trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP); \ } \ } while (0) @@ -599,6 +580,18 @@ AddTraceEventWithThreadIdAndTimestamp( base::trace_event::TraceArguments* args, unsigned int flags); +base::trace_event::TraceEventHandle BASE_EXPORT +AddTraceEventWithThreadIdAndTimestamps( + char phase, + const unsigned char* category_group_enabled, + const char* name, + const char* scope, + unsigned long long id, + int thread_id, + const base::TimeTicks& timestamp, + const base::ThreadTicks& thread_timestamp, + unsigned int flags); + void BASE_EXPORT AddMetadataEvent(const unsigned char* category_group_enabled, const char* name, base::trace_event::TraceArguments* args, diff --git a/chromium/base/trace_event/trace_event_etw_export_win.cc b/chromium/base/trace_event/trace_event_etw_export_win.cc index bf7d6f1018b..680cdbdc027 100644 --- a/chromium/base/trace_event/trace_event_etw_export_win.cc +++ b/chromium/base/trace_event/trace_event_etw_export_win.cc @@ -7,8 +7,8 @@ #include <stddef.h> #include "base/at_exit.h" +#include "base/check_op.h" #include "base/command_line.h" -#include "base/logging.h" #include "base/memory/singleton.h" #include "base/strings/string_tokenizer.h" #include "base/strings/utf_string_conversions.h" diff --git a/chromium/base/trace_event/trace_event_filter_test_utils.cc b/chromium/base/trace_event/trace_event_filter_test_utils.cc index 85b4cfa2768..79663072eb1 100644 --- a/chromium/base/trace_event/trace_event_filter_test_utils.cc +++ b/chromium/base/trace_event/trace_event_filter_test_utils.cc @@ -4,7 +4,7 @@ #include "base/trace_event/trace_event_filter_test_utils.h" -#include "base/logging.h" +#include "base/check.h" namespace base { namespace trace_event { diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc index f7e1761f3fb..c0d85ea90f1 100644 --- a/chromium/base/trace_event/trace_log.cc +++ b/chromium/base/trace_event/trace_log.cc @@ -118,8 +118,8 @@ void InitializeMetadataEvent(TraceEvent* trace_event, TraceArguments args(arg_name, value); base::TimeTicks now = TRACE_TIME_TICKS_NOW(); - ThreadTicks thread_now = ThreadNow(); - ThreadInstructionCount thread_instruction_count = ThreadInstructionNow(); + ThreadTicks thread_now; + ThreadInstructionCount thread_instruction_count; trace_event->Reset(thread_id, now, thread_now, thread_instruction_count, TRACE_EVENT_PHASE_METADATA, CategoryRegistry::kCategoryMetadata->state_ptr(), @@ -1226,6 +1226,32 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( int thread_id, const TimeTicks& timestamp, TraceArguments* args, + unsigned int flags) { + ThreadTicks thread_now; + // If timestamp is provided explicitly, don't record thread time as it would + // be for the wrong timestamp. Similarly, if we record an event for another + // process or thread, we shouldn't report the current thread's thread time. + if (!(flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP || + flags & TRACE_EVENT_FLAG_HAS_PROCESS_ID || + thread_id != static_cast<int>(PlatformThread::CurrentId()))) { + thread_now = ThreadNow(); + } + return AddTraceEventWithThreadIdAndTimestamps( + phase, category_group_enabled, name, scope, id, bind_id, thread_id, + timestamp, thread_now, args, flags); +} + +TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamps( + char phase, + const unsigned char* category_group_enabled, + const char* name, + const char* scope, + unsigned long long id, + unsigned long long bind_id, + int thread_id, + const TimeTicks& timestamp, + const ThreadTicks& thread_timestamp, + TraceArguments* args, unsigned int flags) NO_THREAD_SAFETY_ANALYSIS { TraceEventHandle handle = {0, 0, 0}; if (!ShouldAddAfterUpdatingState(phase, category_group_enabled, name, id, @@ -1244,8 +1270,16 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( bind_id = MangleEventId(bind_id); TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp); - ThreadTicks thread_now = ThreadNow(); - ThreadInstructionCount thread_instruction_now = ThreadInstructionNow(); + ThreadInstructionCount thread_instruction_now; + // If timestamp is provided explicitly, don't record thread instruction count + // as it would be for the wrong timestamp. Similarly, if we record an event + // for another process or thread, we shouldn't report the current thread's + // thread time. + if (!(flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP || + flags & TRACE_EVENT_FLAG_HAS_PROCESS_ID || + thread_id != static_cast<int>(PlatformThread::CurrentId()))) { + thread_instruction_now = ThreadInstructionNow(); + } ThreadLocalEventBuffer* thread_local_event_buffer = nullptr; if (*category_group_enabled & RECORDING_MODE) { @@ -1259,9 +1293,10 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( auto trace_event_override = add_trace_event_override_.load(std::memory_order_relaxed); if (trace_event_override) { - TraceEvent new_trace_event( - thread_id, offset_event_timestamp, thread_now, thread_instruction_now, - phase, category_group_enabled, name, scope, id, bind_id, args, flags); + TraceEvent new_trace_event(thread_id, offset_event_timestamp, + thread_timestamp, thread_instruction_now, + phase, category_group_enabled, name, scope, id, + bind_id, args, flags); trace_event_override( &new_trace_event, @@ -1275,8 +1310,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( bool disabled_by_filters = false; if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) { auto new_trace_event = std::make_unique<TraceEvent>( - thread_id, offset_event_timestamp, thread_now, thread_instruction_now, - phase, category_group_enabled, name, scope, id, bind_id, args, flags); + thread_id, offset_event_timestamp, thread_timestamp, + thread_instruction_now, phase, category_group_enabled, name, scope, id, + bind_id, args, flags); disabled_by_filters = true; ForEachCategoryFilter( @@ -1308,7 +1344,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( if (filtered_trace_event) { *trace_event = std::move(*filtered_trace_event); } else { - trace_event->Reset(thread_id, offset_event_timestamp, thread_now, + trace_event->Reset(thread_id, offset_event_timestamp, thread_timestamp, thread_instruction_now, phase, category_group_enabled, name, scope, id, bind_id, args, flags); @@ -1818,6 +1854,23 @@ base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp( timestamp, args, flags); } +base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamps( + char phase, + const unsigned char* category_group_enabled, + const char* name, + const char* scope, + unsigned long long id, + int thread_id, + const base::TimeTicks& timestamp, + const base::ThreadTicks& thread_timestamp, + unsigned int flags) { + return base::trace_event::TraceLog::GetInstance() + ->AddTraceEventWithThreadIdAndTimestamps( + phase, category_group_enabled, name, scope, id, + /*bind_id=*/trace_event_internal::kNoId, thread_id, timestamp, + thread_timestamp, nullptr, flags); +} + void AddMetadataEvent(const unsigned char* category_group_enabled, const char* name, base::trace_event::TraceArguments* args, diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h index 978c72fe82d..4b0ef0f3ccb 100644 --- a/chromium/base/trace_event/trace_log.h +++ b/chromium/base/trace_event/trace_log.h @@ -287,6 +287,18 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider { const TimeTicks& timestamp, TraceArguments* args, unsigned int flags); + TraceEventHandle AddTraceEventWithThreadIdAndTimestamps( + char phase, + const unsigned char* category_group_enabled, + const char* name, + const char* scope, + unsigned long long id, + unsigned long long bind_id, + int thread_id, + const TimeTicks& timestamp, + const ThreadTicks& thread_timestamp, + TraceArguments* args, + unsigned int flags); // Adds a metadata event that will be written when the trace log is flushed. void AddMetadataEvent(const unsigned char* category_group_enabled, diff --git a/chromium/base/unguessable_token.cc b/chromium/base/unguessable_token.cc index 973b4167bd2..9f9b8b041f6 100644 --- a/chromium/base/unguessable_token.cc +++ b/chromium/base/unguessable_token.cc @@ -4,6 +4,8 @@ #include "base/unguessable_token.h" +#include <ostream> + #include "base/format_macros.h" #include "base/no_destructor.h" #include "base/rand_util.h" diff --git a/chromium/base/util/BUILD.gn b/chromium/base/util/BUILD.gn index 4b48fdfa180..d7a680b87fe 100644 --- a/chromium/base/util/BUILD.gn +++ b/chromium/base/util/BUILD.gn @@ -7,6 +7,7 @@ import("//testing/test.gni") test("base_util_unittests") { deps = [ "memory_pressure:unittests", + "timer:unittests", "type_safety:tests", "values:unittests", "//base/test:run_all_base_unittests", diff --git a/chromium/base/util/memory_pressure/fake_memory_pressure_monitor.cc b/chromium/base/util/memory_pressure/fake_memory_pressure_monitor.cc index b9273c5a7ac..bb3d8ec4bff 100644 --- a/chromium/base/util/memory_pressure/fake_memory_pressure_monitor.cc +++ b/chromium/base/util/memory_pressure/fake_memory_pressure_monitor.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "base/util/memory_pressure/fake_memory_pressure_monitor.h" +#include "base/logging.h" namespace util { namespace test { diff --git a/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc b/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc index eef7f7292ce..74f966381a4 100644 --- a/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc +++ b/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc @@ -5,7 +5,7 @@ #include "base/util/memory_pressure/multi_source_memory_pressure_monitor.h" #include "base/bind.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_macros.h" #include "base/time/time.h" diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc index 2cc13d2bfc0..f8b3791b2b0 100644 --- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc +++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc @@ -58,6 +58,9 @@ constexpr char kMinFilelist[] = "/proc/sys/vm/min_filelist_kbytes"; constexpr char kRamVsSwapWeight[] = "/sys/kernel/mm/chromeos-low_mem/ram_vs_swap_weight"; +// The extra free to trigger kernel memory reclaim earlier. +constexpr char kExtraFree[] = "/proc/sys/vm/extra_free_kbytes"; + // Converts an available memory value in MB to a memory pressure level. base::MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromAvailable(int available_mb, @@ -306,7 +309,13 @@ uint64_t SystemMemoryPressureEvaluator::GetReservedMemoryKB() { LOG(ERROR) << "Couldn't get /proc/zoneinfo"; return 0; } - return CalculateReservedFreeKB(file_contents); + + // Reserve free pages is high watermark + lowmem_reserve and extra_free_kbytes + // raises the high watermark. Nullify the effect of extra_free_kbytes by + // excluding it from the reserved pages. The default extra_free_kbytes value + // is 0 if the file couldn't be accessed. + return CalculateReservedFreeKB(file_contents) - + ReadFileToUint64(base::FilePath(kExtraFree)); } // CalculateAvailableMemoryUserSpaceKB implements the same available memory diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc index b7a8d72618c..10dd507b5fc 100644 --- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc +++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc @@ -26,10 +26,6 @@ namespace util { namespace chromeos { namespace { -bool SetFileContents(const base::FilePath& path, const std::string& contents) { - return static_cast<std::string::size_type>(base::WriteFile( - path, contents.c_str(), contents.size())) == contents.size(); -} // Since it would be very hard to mock sysfs instead we will send in our own // implementation of WaitForKernelNotification which instead will block on a @@ -68,6 +64,7 @@ void RunLoopRunWithTimeout(base::TimeDelta timeout) { timeout); run_loop.Run(); } + } // namespace class TestSystemMemoryPressureEvaluator : public SystemMemoryPressureEvaluator { @@ -113,14 +110,14 @@ TEST(ChromeOSSystemMemoryPressureEvaluatorTest, ParseMarginFileGood) { base::FilePath margin_file = tmp_dir.GetPath().Append("margin"); - ASSERT_TRUE(SetFileContents(margin_file, "123")); + ASSERT_TRUE(base::WriteFile(margin_file, "123")); const std::vector<int> parts1 = TestSystemMemoryPressureEvaluator::GetMarginFileParts( margin_file.value()); ASSERT_EQ(1u, parts1.size()); ASSERT_EQ(123, parts1[0]); - ASSERT_TRUE(SetFileContents(margin_file, "123 456")); + ASSERT_TRUE(base::WriteFile(margin_file, "123 456")); const std::vector<int> parts2 = TestSystemMemoryPressureEvaluator::GetMarginFileParts( margin_file.value()); @@ -135,19 +132,19 @@ TEST(ChromeOSSystemMemoryPressureEvaluatorTest, ParseMarginFileBad) { base::FilePath margin_file = tmp_dir.GetPath().Append("margin"); // An empty margin file is bad. - ASSERT_TRUE(SetFileContents(margin_file, "")); + ASSERT_TRUE(base::WriteFile(margin_file, "")); ASSERT_TRUE( TestSystemMemoryPressureEvaluator::GetMarginFileParts(margin_file.value()) .empty()); // The numbers will be in base10, so 4a6 would be invalid. - ASSERT_TRUE(SetFileContents(margin_file, "123 4a6")); + ASSERT_TRUE(base::WriteFile(margin_file, "123 4a6")); ASSERT_TRUE( TestSystemMemoryPressureEvaluator::GetMarginFileParts(margin_file.value()) .empty()); // The numbers must be integers. - ASSERT_TRUE(SetFileContents(margin_file, "123.2 412.3")); + ASSERT_TRUE(base::WriteFile(margin_file, "123.2 412.3")); ASSERT_TRUE( TestSystemMemoryPressureEvaluator::GetMarginFileParts(margin_file.value()) .empty()); @@ -238,11 +235,11 @@ TEST(ChromeOSSystemMemoryPressureEvaluatorTest, CheckMemoryPressure) { // Set the margin values to 500 (critical) and 1000 (moderate). const std::string kMarginContents = "500 1000"; - ASSERT_TRUE(SetFileContents(margin_file, kMarginContents)); + ASSERT_TRUE(base::WriteFile(margin_file, kMarginContents)); // Write the initial available contents. const std::string kInitialAvailableContents = "1500"; - ASSERT_TRUE(SetFileContents(available_file, kInitialAvailableContents)); + ASSERT_TRUE(base::WriteFile(available_file, kInitialAvailableContents)); base::test::TaskEnvironment task_environment( base::test::TaskEnvironment::MainThreadType::UI); @@ -286,7 +283,7 @@ TEST(ChromeOSSystemMemoryPressureEvaluatorTest, CheckMemoryPressure) { evaluator->current_vote()); // Moderate Pressure. - ASSERT_TRUE(SetFileContents(available_file, "900")); + ASSERT_TRUE(base::WriteFile(available_file, "900")); TriggerKernelNotification(write_end.get()); // TODO(bgeffon): Use RunLoop::QuitClosure() instead of relying on "spin for // 1 second". @@ -295,28 +292,28 @@ TEST(ChromeOSSystemMemoryPressureEvaluatorTest, CheckMemoryPressure) { evaluator->current_vote()); // Critical Pressure. - ASSERT_TRUE(SetFileContents(available_file, "450")); + ASSERT_TRUE(base::WriteFile(available_file, "450")); TriggerKernelNotification(write_end.get()); RunLoopRunWithTimeout(base::TimeDelta::FromSeconds(1)); ASSERT_EQ(base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, evaluator->current_vote()); // Moderate Pressure. - ASSERT_TRUE(SetFileContents(available_file, "550")); + ASSERT_TRUE(base::WriteFile(available_file, "550")); TriggerKernelNotification(write_end.get()); RunLoopRunWithTimeout(base::TimeDelta::FromSeconds(1)); ASSERT_EQ(base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, evaluator->current_vote()); // No pressure, note: this will not cause any event. - ASSERT_TRUE(SetFileContents(available_file, "1150")); + ASSERT_TRUE(base::WriteFile(available_file, "1150")); TriggerKernelNotification(write_end.get()); RunLoopRunWithTimeout(base::TimeDelta::FromSeconds(1)); ASSERT_EQ(base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE, evaluator->current_vote()); // Back into moderate. - ASSERT_TRUE(SetFileContents(available_file, "950")); + ASSERT_TRUE(base::WriteFile(available_file, "950")); TriggerKernelNotification(write_end.get()); RunLoopRunWithTimeout(base::TimeDelta::FromSeconds(1)); ASSERT_EQ(base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_mac.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_mac.cc index 3b7ab5eb3d3..91dc9486f1f 100644 --- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_mac.cc +++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_mac.cc @@ -13,7 +13,7 @@ #include <cmath> #include "base/bind.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/mac/mac_util.h" #include "base/memory/memory_pressure_monitor.h" #include "base/threading/sequenced_task_runner_handle.h" diff --git a/chromium/base/util/timer/BUILD.gn b/chromium/base/util/timer/BUILD.gn new file mode 100644 index 00000000000..e77d9494034 --- /dev/null +++ b/chromium/base/util/timer/BUILD.gn @@ -0,0 +1,23 @@ +# Copyright 2020 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +source_set("timer") { + public = [ "wall_clock_timer.h" ] + + sources = [ "wall_clock_timer.cc" ] + + deps = [ "//base:base" ] +} + +source_set("unittests") { + testonly = true + sources = [ "wall_clock_timer_unittest.cc" ] + + deps = [ + ":timer", + "//base/test:test_support", + "//testing/gmock", + "//testing/gtest", + ] +} diff --git a/chromium/base/util/timer/OWNERS b/chromium/base/util/timer/OWNERS new file mode 100644 index 00000000000..2e628c64419 --- /dev/null +++ b/chromium/base/util/timer/OWNERS @@ -0,0 +1,2 @@ +grt@chromium.org +zmin@chromium.org diff --git a/chromium/base/util/timer/wall_clock_timer.cc b/chromium/base/util/timer/wall_clock_timer.cc new file mode 100644 index 00000000000..5420ec7190b --- /dev/null +++ b/chromium/base/util/timer/wall_clock_timer.cc @@ -0,0 +1,76 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/util/timer/wall_clock_timer.h" + +#include <utility> + +#include "base/power_monitor/power_monitor.h" +#include "base/time/clock.h" +#include "base/time/default_clock.h" +#include "base/time/default_tick_clock.h" +#include "base/time/tick_clock.h" + +namespace util { + +WallClockTimer::WallClockTimer() = default; +WallClockTimer::WallClockTimer(const base::Clock* clock, + const base::TickClock* tick_clock) + : timer_(tick_clock), + clock_(clock ? clock : base::DefaultClock::GetInstance()) {} + +WallClockTimer::~WallClockTimer() { + RemoveObserver(); +} + +void WallClockTimer::Start(const base::Location& posted_from, + base::Time desired_run_time, + base::OnceClosure user_task) { + user_task_ = std::move(user_task); + posted_from_ = posted_from; + desired_run_time_ = desired_run_time; + AddObserver(); + timer_.Start(posted_from_, desired_run_time_ - Now(), this, + &WallClockTimer::RunUserTask); +} + +void WallClockTimer::Stop() { + timer_.Stop(); + user_task_.Reset(); + RemoveObserver(); +} + +bool WallClockTimer::IsRunning() const { + return timer_.IsRunning(); +} + +void WallClockTimer::OnResume() { + // This will actually restart timer with smaller delay + timer_.Start(posted_from_, desired_run_time_ - Now(), this, + &WallClockTimer::RunUserTask); +} + +void WallClockTimer::AddObserver() { + if (!observer_added_) + observer_added_ = base::PowerMonitor::AddObserver(this); +} + +void WallClockTimer::RemoveObserver() { + if (observer_added_) { + base::PowerMonitor::RemoveObserver(this); + observer_added_ = false; + } +} + +void WallClockTimer::RunUserTask() { + DCHECK(user_task_); + RemoveObserver(); + std::exchange(user_task_, {}).Run(); +} + +base::Time WallClockTimer::Now() const { + return clock_->Now(); +} + +} // namespace util diff --git a/chromium/base/util/timer/wall_clock_timer.h b/chromium/base/util/timer/wall_clock_timer.h new file mode 100644 index 00000000000..8c955fe908f --- /dev/null +++ b/chromium/base/util/timer/wall_clock_timer.h @@ -0,0 +1,111 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_UTIL_TIMER_WALL_CLOCK_TIMER_H_ +#define BASE_UTIL_TIMER_WALL_CLOCK_TIMER_H_ + +#include "base/bind.h" +#include "base/callback.h" +#include "base/location.h" +#include "base/power_monitor/power_observer.h" +#include "base/time/default_clock.h" +#include "base/time/time.h" +#include "base/timer/timer.h" + +namespace base { +class Clock; +class TickClock; +} // namespace base + +namespace util { + +// WallClockTimer is based on OneShotTimer and provides a simple timer API +// which is mostly similar to OneShotTimer's API. The main difference is that +// WallClockTimer is using Time (which is system-dependent) to schedule task. +// WallClockTimer calls you back once scheduled time has come. +// +// Comparison with OneShotTimer: WallClockTimer runs |user_task_| after |delay_| +// expires according to usual time, while OneShotTimer runs |user_task_| after +// |delay_| expires according to TimeTicks which freezes when power suspends +// (desktop falls asleep). +// +// The API is not thread safe. All methods must be called from the same +// sequence (not necessarily the construction sequence), except for the +// destructor. +// - The destructor may be called from any sequence when the timer is not +// running and there is no scheduled task active. +class WallClockTimer : public base::PowerObserver { + public: + // Constructs a timer. Start() must be called later to start the timer. + // If |clock| is provided, it's used instead of + // base::DefaultClock::GetInstance() to calulate timer's delay. + // If |tick_clock| is provided, it's used instead of base::TimeTicks::Now() to + // get base::TimeTicks when scheduling tasks. + WallClockTimer(); + WallClockTimer(const base::Clock* clock, const base::TickClock* tick_clock); + WallClockTimer(const WallClockTimer&) = delete; + WallClockTimer& operator=(const WallClockTimer&) = delete; + + ~WallClockTimer() override; + + // Starts the timer to run at the given |desired_run_time|. If the timer is + // already running, it will be replaced to call the given |user_task|. + virtual void Start(const base::Location& posted_from, + base::Time desired_run_time, + base::OnceClosure user_task); + + // Starts the timer to run at the given |desired_run_time|. If the timer is + // already running, it will be replaced to call a task formed from + // |receiver|->*|method|. + template <class Receiver> + void Start(const base::Location& posted_from, + base::Time desired_run_time, + Receiver* receiver, + void (Receiver::*method)()) { + Start(posted_from, desired_run_time, + base::BindOnce(method, base::Unretained(receiver))); + } + + // Stops the timer. It is a no-op if the timer is not running. + void Stop(); + + // Returns true if the timer is running. + bool IsRunning() const; + + // base::PowerObserver: + void OnResume() override; + + base::Time desired_run_time() const { return desired_run_time_; } + + private: + void AddObserver(); + + void RemoveObserver(); + + // Actually run scheduled task + void RunUserTask(); + + // Returns the current time count. + base::Time Now() const; + + bool observer_added_ = false; + + // Location in user code. + base::Location posted_from_; + + // The desired run time of |user_task_|. + base::Time desired_run_time_; + + base::OnceClosure user_task_; + + // Timer which should notify to run task in the period while system awake + base::OneShotTimer timer_; + + // The clock used to calculate the run time for scheduled tasks. + const base::Clock* const clock_ = base::DefaultClock::GetInstance(); +}; + +} // namespace util + +#endif // BASE_UTIL_TIMER_WALL_CLOCK_TIMER_H_ diff --git a/chromium/base/util/timer/wall_clock_timer_unittest.cc b/chromium/base/util/timer/wall_clock_timer_unittest.cc new file mode 100644 index 00000000000..961f5c021f0 --- /dev/null +++ b/chromium/base/util/timer/wall_clock_timer_unittest.cc @@ -0,0 +1,219 @@ +// Copyright 2018 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/util/timer/wall_clock_timer.h" + +#include <memory> +#include <utility> + +#include "base/power_monitor/power_monitor.h" +#include "base/power_monitor/power_monitor_source.h" +#include "base/test/mock_callback.h" +#include "base/test/simple_test_clock.h" +#include "base/test/task_environment.h" +#include "testing/gmock/include/gmock/gmock.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace util { + +namespace { + +class StubPowerMonitorSource : public base::PowerMonitorSource { + public: + // Use this method to send a power resume event. + void Resume() { ProcessPowerEvent(RESUME_EVENT); } + + // Use this method to send a power suspend event. + void Suspend() { ProcessPowerEvent(SUSPEND_EVENT); } + + // base::PowerMonitorSource: + bool IsOnBatteryPowerImpl() override { return false; } +}; + +} // namespace + +class WallClockTimerTest : public ::testing::Test { + protected: + WallClockTimerTest() { + auto mock_power_monitor_source = std::make_unique<StubPowerMonitorSource>(); + mock_power_monitor_source_ = mock_power_monitor_source.get(); + base::PowerMonitor::Initialize(std::move(mock_power_monitor_source)); + } + + ~WallClockTimerTest() override { base::PowerMonitor::ShutdownForTesting(); } + + // Fast-forwards virtual time by |delta|. If |with_power| is true, both + // |clock_| and |task_environment_| time will be fast-forwarded. Otherwise, + // only |clock_| time will be changed to mimic the behavior when machine is + // suspended. + // Power event will be triggered if |with_power| is set to false. + void FastForwardBy(base::TimeDelta delay, bool with_power = true) { + if (!with_power) + mock_power_monitor_source_->Suspend(); + + clock_.SetNow(clock_.Now() + delay); + + if (with_power) { + task_environment_.FastForwardBy(delay); + } else { + mock_power_monitor_source_->Resume(); + task_environment_.RunUntilIdle(); + } + } + + // Owned by power_monitor_. Use this to simulate a power suspend and resume. + StubPowerMonitorSource* mock_power_monitor_source_ = nullptr; + base::test::SingleThreadTaskEnvironment task_environment_{ + base::test::TaskEnvironment::TimeSource::MOCK_TIME}; + base::SimpleTestClock clock_; +}; + +TEST_F(WallClockTimerTest, PowerResume) { + ::testing::StrictMock<base::MockOnceClosure> callback; + // Set up a WallClockTimer that will fire in one minute. + WallClockTimer wall_clock_timer(&clock_, + task_environment_.GetMockTickClock()); + constexpr auto delay = base::TimeDelta::FromMinutes(1); + const auto start_time = base::Time::Now(); + const auto run_time = start_time + delay; + clock_.SetNow(start_time); + wall_clock_timer.Start(FROM_HERE, run_time, callback.Get()); + EXPECT_EQ(wall_clock_timer.desired_run_time(), start_time + delay); + + // Pretend that time jumps forward 30 seconds while the machine is suspended. + constexpr auto past_time = base::TimeDelta::FromSeconds(30); + FastForwardBy(past_time, /*with_power=*/false); + // Ensure that the timer has not yet fired. + ::testing::Mock::VerifyAndClearExpectations(&callback); + EXPECT_EQ(wall_clock_timer.desired_run_time(), start_time + delay); + + // Expect that the timer fires at the desired run time. + EXPECT_CALL(callback, Run()); + // Both Time::Now() and |task_environment_| MockTickClock::Now() + // go forward by (|delay| - |past_time|): + FastForwardBy(delay - past_time); + ::testing::Mock::VerifyAndClearExpectations(&callback); + EXPECT_FALSE(wall_clock_timer.IsRunning()); +} + +TEST_F(WallClockTimerTest, UseTimerTwiceInRow) { + ::testing::StrictMock<base::MockOnceClosure> first_callback; + ::testing::StrictMock<base::MockOnceClosure> second_callback; + const auto start_time = base::Time::Now(); + clock_.SetNow(start_time); + + // Set up a WallClockTimer that will invoke |first_callback| in one minute. + // Once it's done, it will invoke |second_callback| after the other minute. + WallClockTimer wall_clock_timer(&clock_, + task_environment_.GetMockTickClock()); + constexpr auto delay = base::TimeDelta::FromMinutes(1); + wall_clock_timer.Start(FROM_HERE, clock_.Now() + delay, first_callback.Get()); + EXPECT_CALL(first_callback, Run()) + .WillOnce(::testing::InvokeWithoutArgs( + [this, &wall_clock_timer, &second_callback, delay]() { + wall_clock_timer.Start(FROM_HERE, clock_.Now() + delay, + second_callback.Get()); + })); + + FastForwardBy(delay); + ::testing::Mock::VerifyAndClearExpectations(&first_callback); + ::testing::Mock::VerifyAndClearExpectations(&second_callback); + + // When the |wall_clock_time| is used for the second time, it can still handle + // power suspension properly. + constexpr auto past_time = base::TimeDelta::FromSeconds(30); + FastForwardBy(past_time, /*with_power=*/false); + ::testing::Mock::VerifyAndClearExpectations(&second_callback); + + EXPECT_CALL(second_callback, Run()); + FastForwardBy(delay - past_time); + ::testing::Mock::VerifyAndClearExpectations(&second_callback); +} + +TEST_F(WallClockTimerTest, Stop) { + ::testing::StrictMock<base::MockOnceClosure> callback; + clock_.SetNow(base::Time::Now()); + + // Set up a WallClockTimer. + WallClockTimer wall_clock_timer(&clock_, + task_environment_.GetMockTickClock()); + constexpr auto delay = base::TimeDelta::FromMinutes(1); + wall_clock_timer.Start(FROM_HERE, clock_.Now() + delay, callback.Get()); + + // After 20 seconds, timer is stopped. + constexpr auto past_time = base::TimeDelta::FromSeconds(20); + FastForwardBy(past_time); + EXPECT_TRUE(wall_clock_timer.IsRunning()); + wall_clock_timer.Stop(); + EXPECT_FALSE(wall_clock_timer.IsRunning()); + + // When power is suspends and resumed, timer won't be resumed. + FastForwardBy(past_time, /*with_power=*/false); + EXPECT_FALSE(wall_clock_timer.IsRunning()); + + // Timer won't fire when desired run time is reached. + FastForwardBy(delay - past_time * 2); + ::testing::Mock::VerifyAndClearExpectations(&callback); +} + +TEST_F(WallClockTimerTest, RestartRunningTimer) { + ::testing::StrictMock<base::MockOnceClosure> first_callback; + ::testing::StrictMock<base::MockOnceClosure> second_callback; + constexpr auto delay = base::TimeDelta::FromMinutes(1); + + // Set up a WallClockTimer that will invoke |first_callback| in one minute. + clock_.SetNow(base::Time::Now()); + WallClockTimer wall_clock_timer(&clock_, + task_environment_.GetMockTickClock()); + wall_clock_timer.Start(FROM_HERE, clock_.Now() + delay, first_callback.Get()); + + // After 30 seconds, replace the timer with |second_callback| with new one + // minute delay. + constexpr auto past_time = delay / 2; + FastForwardBy(past_time); + wall_clock_timer.Start(FROM_HERE, clock_.Now() + delay, + second_callback.Get()); + + // |first_callback| is due but it won't be called because it's replaced. + FastForwardBy(past_time); + ::testing::Mock::VerifyAndClearExpectations(&first_callback); + ::testing::Mock::VerifyAndClearExpectations(&second_callback); + + // Timer invokes the |second_callback|. + EXPECT_CALL(second_callback, Run()); + FastForwardBy(past_time); + ::testing::Mock::VerifyAndClearExpectations(&first_callback); + ::testing::Mock::VerifyAndClearExpectations(&second_callback); +} + +TEST_F(WallClockTimerTest, DoubleStop) { + ::testing::StrictMock<base::MockOnceClosure> callback; + clock_.SetNow(base::Time::Now()); + + // Set up a WallClockTimer. + WallClockTimer wall_clock_timer(&clock_, + task_environment_.GetMockTickClock()); + constexpr auto delay = base::TimeDelta::FromMinutes(1); + wall_clock_timer.Start(FROM_HERE, clock_.Now() + delay, callback.Get()); + + // After 15 seconds, timer is stopped. + constexpr auto past_time = delay / 4; + FastForwardBy(past_time); + EXPECT_TRUE(wall_clock_timer.IsRunning()); + wall_clock_timer.Stop(); + EXPECT_FALSE(wall_clock_timer.IsRunning()); + + // And timer is stopped again later. The second stop should be a no-op. + FastForwardBy(past_time); + EXPECT_FALSE(wall_clock_timer.IsRunning()); + wall_clock_timer.Stop(); + EXPECT_FALSE(wall_clock_timer.IsRunning()); + + // Timer won't fire after stop. + FastForwardBy(past_time, /*with_power=*/false); + FastForwardBy(delay - past_time * 3); + ::testing::Mock::VerifyAndClearExpectations(&callback); +} + +} // namespace util diff --git a/chromium/base/util/type_safety/id_type.h b/chromium/base/util/type_safety/id_type.h index 78748790c73..918e945b512 100644 --- a/chromium/base/util/type_safety/id_type.h +++ b/chromium/base/util/type_safety/id_type.h @@ -6,6 +6,7 @@ #define BASE_UTIL_TYPE_SAFETY_ID_TYPE_H_ #include <cstdint> +#include <type_traits> #include "base/util/type_safety/strong_alias.h" @@ -49,9 +50,10 @@ namespace util { template <typename TypeMarker, typename WrappedType, WrappedType kInvalidValue> class IdType : public StrongAlias<TypeMarker, WrappedType> { public: - static_assert(kInvalidValue <= 0, - "The invalid value should be negative or equal to zero to " - "avoid overflow issues."); + static_assert( + std::is_unsigned<WrappedType>::value || kInvalidValue <= 0, + "If signed, the invalid value should be negative or equal to zero to " + "avoid overflow issues."); using StrongAlias<TypeMarker, WrappedType>::StrongAlias; @@ -60,7 +62,6 @@ class IdType : public StrongAlias<TypeMarker, WrappedType> { class Generator { public: Generator() = default; - ~Generator() = default; // Generates the next unique ID. IdType GenerateNextId() { return FromUnsafeValue(next_id_++); } @@ -74,15 +75,19 @@ class IdType : public StrongAlias<TypeMarker, WrappedType> { }; // Default-construct in the null state. - IdType() : StrongAlias<TypeMarker, WrappedType>::StrongAlias(kInvalidValue) {} + constexpr IdType() + : StrongAlias<TypeMarker, WrappedType>::StrongAlias(kInvalidValue) {} - bool is_null() const { return this->value() == kInvalidValue; } + constexpr bool is_null() const { return this->value() == kInvalidValue; } + constexpr explicit operator bool() const { return !is_null(); } // TODO(mpawlowski) Replace these with constructor/value() getter. The // conversions are safe as long as they're explicit (which is taken care of by // StrongAlias). - static IdType FromUnsafeValue(WrappedType value) { return IdType(value); } - WrappedType GetUnsafeValue() const { return this->value(); } + constexpr static IdType FromUnsafeValue(WrappedType value) { + return IdType(value); + } + constexpr WrappedType GetUnsafeValue() const { return this->value(); } }; // Type aliases for convenience: diff --git a/chromium/base/util/type_safety/id_type_unittest.cc b/chromium/base/util/type_safety/id_type_unittest.cc index 04be50f2441..1e07bcf2748 100644 --- a/chromium/base/util/type_safety/id_type_unittest.cc +++ b/chromium/base/util/type_safety/id_type_unittest.cc @@ -40,6 +40,40 @@ TEST(IdType, GeneratorWithNonZeroInvalidValue) { EXPECT_EQ(test_id_generator.GenerateNextId(), TestId::FromUnsafeValue(i)); } +TEST(IdType, GeneratorWithBigUnsignedInvalidValue) { + using TestId = + IdType<class TestIdTag, uint32_t, std::numeric_limits<uint32_t>::max()>; + + TestId::Generator test_id_generator; + for (int i = 0; i < 10; i++) { + TestId id = test_id_generator.GenerateNextId(); + EXPECT_FALSE(id.is_null()); + EXPECT_EQ(id, TestId::FromUnsafeValue(i)); + } +} + +TEST(IdType, EnsureConstexpr) { + using TestId = IdType32<class TestTag>; + + // Test constructors. + static constexpr TestId kZero; + static constexpr auto kOne = TestId::FromUnsafeValue(1); + + // Test getting the underlying value. + static_assert(kZero.value() == 0, ""); + static_assert(kOne.value() == 1, ""); + static_assert(kZero.GetUnsafeValue() == 0, ""); + static_assert(kOne.GetUnsafeValue() == 1, ""); + + // Test is_null(). + static_assert(kZero.is_null(), ""); + static_assert(!kOne.is_null(), ""); + + // Test operator bool. + static_assert(!kZero, ""); + static_assert(kOne, ""); +} + class IdTypeSpecificValueTest : public ::testing::TestWithParam<int> { protected: FooId test_id() { return FooId::FromUnsafeValue(GetParam()); } diff --git a/chromium/base/util/type_safety/strong_alias.h b/chromium/base/util/type_safety/strong_alias.h index 05038bf3e2d..9c49b777882 100644 --- a/chromium/base/util/type_safety/strong_alias.h +++ b/chromium/base/util/type_safety/strong_alias.h @@ -66,35 +66,33 @@ namespace util { template <typename TagType, typename UnderlyingType> class StrongAlias { public: - StrongAlias() = default; - explicit StrongAlias(const UnderlyingType& v) : value_(v) {} - explicit StrongAlias(UnderlyingType&& v) : value_(std::move(v)) {} - ~StrongAlias() = default; + constexpr StrongAlias() = default; + constexpr explicit StrongAlias(const UnderlyingType& v) : value_(v) {} + constexpr explicit StrongAlias(UnderlyingType&& v) : value_(std::move(v)) {} - StrongAlias(const StrongAlias& other) = default; - StrongAlias& operator=(const StrongAlias& other) = default; - StrongAlias(StrongAlias&& other) = default; - StrongAlias& operator=(StrongAlias&& other) = default; + constexpr UnderlyingType& value() & { return value_; } + constexpr const UnderlyingType& value() const& { return value_; } + constexpr UnderlyingType&& value() && { return std::move(value_); } + constexpr const UnderlyingType&& value() const&& { return std::move(value_); } - const UnderlyingType& value() const { return value_; } - explicit operator UnderlyingType() const { return value_; } + constexpr explicit operator UnderlyingType() const { return value_; } - bool operator==(const StrongAlias& other) const { + constexpr bool operator==(const StrongAlias& other) const { return value_ == other.value_; } - bool operator!=(const StrongAlias& other) const { + constexpr bool operator!=(const StrongAlias& other) const { return value_ != other.value_; } - bool operator<(const StrongAlias& other) const { + constexpr bool operator<(const StrongAlias& other) const { return value_ < other.value_; } - bool operator<=(const StrongAlias& other) const { + constexpr bool operator<=(const StrongAlias& other) const { return value_ <= other.value_; } - bool operator>(const StrongAlias& other) const { + constexpr bool operator>(const StrongAlias& other) const { return value_ > other.value_; } - bool operator>=(const StrongAlias& other) const { + constexpr bool operator>=(const StrongAlias& other) const { return value_ >= other.value_; } diff --git a/chromium/base/util/type_safety/strong_alias_unittest.cc b/chromium/base/util/type_safety/strong_alias_unittest.cc index d7b13cf8ca6..2c6eedae2b6 100644 --- a/chromium/base/util/type_safety/strong_alias_unittest.cc +++ b/chromium/base/util/type_safety/strong_alias_unittest.cc @@ -6,6 +6,7 @@ #include <cstdint> #include <map> +#include <memory> #include <sstream> #include <string> #include <type_traits> @@ -93,6 +94,11 @@ TYPED_TEST(StrongAliasTest, CanBeMoveConstructed) { FooAlias move_assigned; move_assigned = std::move(alias2); EXPECT_EQ(move_assigned, FooAlias(GetExampleValue<TypeParam>(2))); + + // Check that FooAlias is nothrow move constructible. This matters for + // performance when used in std::vectors. + static_assert(std::is_nothrow_move_constructible<FooAlias>::value, + "Error: Alias is not nothow move constructible"); } TYPED_TEST(StrongAliasTest, CanBeConstructedFromMoveOnlyType) { @@ -107,6 +113,25 @@ TYPED_TEST(StrongAliasTest, CanBeConstructedFromMoveOnlyType) { EXPECT_EQ(*b.value(), GetExampleValue<TypeParam>(1)); } +TYPED_TEST(StrongAliasTest, MutableValue) { + // Note, using a move-only unique_ptr to T: + using Ptr = std::unique_ptr<TypeParam>; + using FooAlias = StrongAlias<class FooTag, Ptr>; + + FooAlias a(std::make_unique<TypeParam>()); + FooAlias b(std::make_unique<TypeParam>()); + EXPECT_TRUE(a.value()); + EXPECT_TRUE(b.value()); + + // Check that both the mutable l-value and r-value overloads work and we can + // move out of the aliases. + { Ptr ignore(std::move(a).value()); } + { Ptr ignore(std::move(b.value())); } + + EXPECT_FALSE(a.value()); + EXPECT_FALSE(b.value()); +} + TYPED_TEST(StrongAliasTest, CanBeWrittenToOutputStream) { using FooAlias = StrongAlias<class FooTag, TypeParam>; @@ -124,6 +149,11 @@ TYPED_TEST(StrongAliasTest, IsDefaultConstructible) { using FooAlias = StrongAlias<class FooTag, TypeParam>; static_assert(std::is_default_constructible<FooAlias>::value, "Should be possible to default-construct a StrongAlias."); + static_assert( + std::is_trivially_default_constructible<FooAlias>::value == + std::is_trivially_default_constructible<TypeParam>::value, + "Should be possible to trivially default-construct a StrongAlias iff the " + "underlying type is trivially default constructible."); } TEST(StrongAliasTest, TrivialTypeAliasIsStandardLayout) { @@ -263,4 +293,28 @@ TYPED_TEST(StrongAliasTest, CanDifferentiateOverloads) { EXPECT_EQ("BarAlias", Scope::Overload(BarAlias())); } +TEST(StrongAliasTest, EnsureConstexpr) { + using FooAlias = StrongAlias<class FooTag, int>; + + // Check constructors. + static constexpr FooAlias kZero{}; + static constexpr FooAlias kOne(1); + + // Check value(). + static_assert(kZero.value() == 0, ""); + static_assert(kOne.value() == 1, ""); + + // Check explicit conversions to underlying type. + static_assert(static_cast<int>(kZero) == 0, ""); + static_assert(static_cast<int>(kOne) == 1, ""); + + // Check comparison operations. + static_assert(kZero == kZero, ""); + static_assert(kZero != kOne, ""); + static_assert(kZero < kOne, ""); + static_assert(kZero <= kOne, ""); + static_assert(kOne > kZero, ""); + static_assert(kOne >= kZero, ""); +} + } // namespace util diff --git a/chromium/base/values.cc b/chromium/base/values.cc index ecca445840a..4315dbc2068 100644 --- a/chromium/base/values.cc +++ b/chromium/base/values.cc @@ -13,10 +13,11 @@ #include <utility> #include "base/bit_cast.h" +#include "base/check_op.h" #include "base/containers/checked_iterators.h" #include "base/json/json_writer.h" -#include "base/logging.h" #include "base/memory/ptr_util.h" +#include "base/notreached.h" #include "base/stl_util.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" @@ -540,6 +541,10 @@ Value* Value::SetStringKey(StringPiece key, StringPiece value) { return SetKeyInternal(key, std::make_unique<Value>(value)); } +Value* Value::SetStringKey(StringPiece key, StringPiece16 value) { + return SetKeyInternal(key, std::make_unique<Value>(value)); +} + Value* Value::SetStringKey(StringPiece key, const char* value) { return SetKeyInternal(key, std::make_unique<Value>(value)); } @@ -548,10 +553,6 @@ Value* Value::SetStringKey(StringPiece key, std::string&& value) { return SetKeyInternal(key, std::make_unique<Value>(std::move(value))); } -Value* Value::SetStringKey(StringPiece key, StringPiece16 value) { - return SetKeyInternal(key, std::make_unique<Value>(value)); -} - bool Value::RemoveKey(StringPiece key) { CHECK(is_dict()); return dict_.erase(key) != 0; diff --git a/chromium/base/values.h b/chromium/base/values.h index f436cbbedfe..8d1ede3fd5e 100644 --- a/chromium/base/values.h +++ b/chromium/base/values.h @@ -305,11 +305,11 @@ class BASE_EXPORT Value { Value* SetIntKey(StringPiece key, int val); Value* SetDoubleKey(StringPiece key, double val); Value* SetStringKey(StringPiece key, StringPiece val); - // NOTE: These two overloads are provided as performance / code generation - // optimizations. + Value* SetStringKey(StringPiece key, StringPiece16 val); + // NOTE: The following two overloads are provided as performance / code + // generation optimizations. Value* SetStringKey(StringPiece key, const char* val); Value* SetStringKey(StringPiece key, std::string&& val); - Value* SetStringKey(StringPiece key, StringPiece16 val); // This attempts to remove the value associated with |key|. In case of // failure, e.g. the key does not exist, false is returned and the underlying diff --git a/chromium/base/version.cc b/chromium/base/version.cc index 2ee8793c03a..d084912ec8d 100644 --- a/chromium/base/version.cc +++ b/chromium/base/version.cc @@ -8,7 +8,7 @@ #include <algorithm> -#include "base/logging.h" +#include "base/check_op.h" #include "base/strings/string_number_conversions.h" #include "base/strings/string_split.h" #include "base/strings/string_util.h" diff --git a/chromium/base/vlog_unittest.cc b/chromium/base/vlog_unittest.cc index 3c3f49cb13b..2fc683a440f 100644 --- a/chromium/base/vlog_unittest.cc +++ b/chromium/base/vlog_unittest.cc @@ -4,7 +4,6 @@ #include "base/vlog.h" -#include "base/logging.h" #include "base/time/time.h" #include "testing/gtest/include/gtest/gtest.h" diff --git a/chromium/base/win/com_init_util.cc b/chromium/base/win/com_init_util.cc index 57c3c6f8d8c..4064a2a8899 100644 --- a/chromium/base/win/com_init_util.cc +++ b/chromium/base/win/com_init_util.cc @@ -7,6 +7,7 @@ #include <windows.h> #include <winternl.h> +#include "base/logging.h" namespace base { namespace win { diff --git a/chromium/base/win/embedded_i18n/generate_embedded_i18n.gni b/chromium/base/win/embedded_i18n/generate_embedded_i18n.gni index eaf9e790e16..79c70492562 100644 --- a/chromium/base/win/embedded_i18n/generate_embedded_i18n.gni +++ b/chromium/base/win/embedded_i18n/generate_embedded_i18n.gni @@ -51,7 +51,6 @@ import("//build/config/locales.gni") # Locales in |all_chrome_locales| are for pak file format. We need to convert them # to the xtb version. -# Todo(perrier): remove sr-Latn once it is in all_chrome_locales default_embedded_i18_locales = all_chrome_locales - [ "en-US", "he", @@ -60,7 +59,7 @@ default_embedded_i18_locales = all_chrome_locales - [ [ "iw", "no", - ] + [ "sr-Latn" ] + ] template("generate_embedded_i18n") { assert(defined(invoker.grd_files_info), diff --git a/chromium/base/win/embedded_i18n/language_selector.cc b/chromium/base/win/embedded_i18n/language_selector.cc index 762b3713e04..fde6a282b0f 100644 --- a/chromium/base/win/embedded_i18n/language_selector.cc +++ b/chromium/base/win/embedded_i18n/language_selector.cc @@ -12,7 +12,7 @@ #include <algorithm> #include <functional> -#include "base/logging.h" +#include "base/check_op.h" #include "base/stl_util.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" diff --git a/chromium/base/win/enum_variant.cc b/chromium/base/win/enum_variant.cc index afff1d57432..654cfc67c13 100644 --- a/chromium/base/win/enum_variant.cc +++ b/chromium/base/win/enum_variant.cc @@ -8,7 +8,7 @@ #include <algorithm> -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace win { diff --git a/chromium/base/win/event_trace_controller.cc b/chromium/base/win/event_trace_controller.cc index dfd9c7b74f8..770f742144f 100644 --- a/chromium/base/win/event_trace_controller.cc +++ b/chromium/base/win/event_trace_controller.cc @@ -4,7 +4,7 @@ // // Implementation of a Windows event trace controller class. #include "base/win/event_trace_controller.h" -#include "base/logging.h" +#include "base/check.h" namespace base { namespace win { diff --git a/chromium/base/win/hstring_reference.cc b/chromium/base/win/hstring_reference.cc index a31df77c7d8..f82d95cdbc5 100644 --- a/chromium/base/win/hstring_reference.cc +++ b/chromium/base/win/hstring_reference.cc @@ -8,7 +8,7 @@ #include <winstring.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/numerics/safe_conversions.h" #include "base/strings/char_traits.h" diff --git a/chromium/base/win/i18n.cc b/chromium/base/win/i18n.cc index 786ad0acc1e..ee6f64df33a 100644 --- a/chromium/base/win/i18n.cc +++ b/chromium/base/win/i18n.cc @@ -6,7 +6,7 @@ #include <windows.h> -#include "base/logging.h" +#include "base/check_op.h" #include "base/strings/string_split.h" #include "base/strings/string_util.h" diff --git a/chromium/base/win/iat_patch_function.cc b/chromium/base/win/iat_patch_function.cc index 07d32c54b64..c466f0f6660 100644 --- a/chromium/base/win/iat_patch_function.cc +++ b/chromium/base/win/iat_patch_function.cc @@ -4,7 +4,8 @@ #include "base/win/iat_patch_function.h" -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "base/win/patch_util.h" #include "base/win/pe_image.h" diff --git a/chromium/base/win/map.h b/chromium/base/win/map.h index 7b55c32a712..ef995ecf027 100644 --- a/chromium/base/win/map.h +++ b/chromium/base/win/map.h @@ -10,6 +10,7 @@ #include <map> +#include "base/logging.h" #include "base/stl_util.h" #include "base/win/vector.h" #include "base/win/winrt_foundation_helpers.h" diff --git a/chromium/base/win/patch_util.cc b/chromium/base/win/patch_util.cc index 0026ed4b0c4..46c1a574e0a 100644 --- a/chromium/base/win/patch_util.cc +++ b/chromium/base/win/patch_util.cc @@ -4,7 +4,7 @@ #include "base/win/patch_util.h" -#include "base/logging.h" +#include "base/notreached.h" namespace base { namespace win { diff --git a/chromium/base/win/registry.cc b/chromium/base/win/registry.cc index 2d8a77262ed..c317daf7067 100644 --- a/chromium/base/win/registry.cc +++ b/chromium/base/win/registry.cc @@ -12,7 +12,8 @@ #include <string> #include <utility> -#include "base/logging.h" +#include "base/check_op.h" +#include "base/notreached.h" #include "base/stl_util.h" #include "base/strings/string_util.h" #include "base/strings/string_util_win.h" diff --git a/chromium/base/win/resource_util.cc b/chromium/base/win/resource_util.cc index c538d6e295b..c011a5b1673 100644 --- a/chromium/base/win/resource_util.cc +++ b/chromium/base/win/resource_util.cc @@ -3,7 +3,7 @@ // found in the LICENSE file. #include "base/win/resource_util.h" -#include "base/logging.h" +#include "base/notreached.h" namespace base { namespace win { diff --git a/chromium/base/win/scoped_bstr.cc b/chromium/base/win/scoped_bstr.cc index 94d7d084c26..6f8297400d9 100644 --- a/chromium/base/win/scoped_bstr.cc +++ b/chromium/base/win/scoped_bstr.cc @@ -6,7 +6,7 @@ #include <stdint.h> -#include "base/logging.h" +#include "base/check.h" #include "base/numerics/safe_conversions.h" #include "base/process/memory.h" #include "base/strings/string_util.h" diff --git a/chromium/base/win/scoped_com_initializer.cc b/chromium/base/win/scoped_com_initializer.cc index a60440347e1..80c97495a50 100644 --- a/chromium/base/win/scoped_com_initializer.cc +++ b/chromium/base/win/scoped_com_initializer.cc @@ -4,7 +4,7 @@ #include "base/win/scoped_com_initializer.h" -#include "base/logging.h" +#include "base/check_op.h" namespace base { namespace win { diff --git a/chromium/base/win/scoped_handle.h b/chromium/base/win/scoped_handle.h index 9055e3eb17a..02c25336493 100644 --- a/chromium/base/win/scoped_handle.h +++ b/chromium/base/win/scoped_handle.h @@ -20,7 +20,7 @@ #define BASE_WIN_GET_CALLER _ReturnAddress() #elif defined(COMPILER_GCC) #define BASE_WIN_GET_CALLER \ - __builtin_extract_return_addr(\ __builtin_return_address(0)) + __builtin_extract_return_addr(__builtin_return_address(0)) #endif namespace base { diff --git a/chromium/base/win/scoped_hstring.cc b/chromium/base/win/scoped_hstring.cc index da4baacc175..5cea8b02461 100644 --- a/chromium/base/win/scoped_hstring.cc +++ b/chromium/base/win/scoped_hstring.cc @@ -6,6 +6,8 @@ #include <winstring.h> +#include "base/check.h" +#include "base/notreached.h" #include "base/numerics/safe_conversions.h" #include "base/process/memory.h" #include "base/strings/string_piece.h" diff --git a/chromium/base/win/scoped_winrt_initializer.cc b/chromium/base/win/scoped_winrt_initializer.cc index e167e5f4a56..c5947527c6a 100644 --- a/chromium/base/win/scoped_winrt_initializer.cc +++ b/chromium/base/win/scoped_winrt_initializer.cc @@ -4,7 +4,7 @@ #include "base/win/scoped_winrt_initializer.h" -#include "base/logging.h" +#include "base/check_op.h" #include "base/win/com_init_util.h" #include "base/win/core_winrt_util.h" #include "base/win/windows_version.h" diff --git a/chromium/base/win/shortcut.cc b/chromium/base/win/shortcut.cc index 7457645e3d9..55f3290cc96 100644 --- a/chromium/base/win/shortcut.cc +++ b/chromium/base/win/shortcut.cc @@ -33,15 +33,22 @@ using Microsoft::WRL::ComPtr; void InitializeShortcutInterfaces(const wchar_t* shortcut, ComPtr<IShellLink>* i_shell_link, ComPtr<IPersistFile>* i_persist_file) { - i_shell_link->Reset(); + // Reset in the inverse order of acquisition. i_persist_file->Reset(); + i_shell_link->Reset(); + + ComPtr<IShellLink> shell_link; if (FAILED(::CoCreateInstance(CLSID_ShellLink, nullptr, CLSCTX_INPROC_SERVER, - IID_PPV_ARGS(i_shell_link->GetAddressOf()))) || - FAILED(i_shell_link->CopyTo(i_persist_file->GetAddressOf())) || - (shortcut && FAILED((*i_persist_file)->Load(shortcut, STGM_READWRITE)))) { - i_shell_link->Reset(); - i_persist_file->Reset(); + IID_PPV_ARGS(&shell_link)))) { + return; } + ComPtr<IPersistFile> persist_file; + if (FAILED(shell_link.As(&persist_file))) + return; + if (shortcut && FAILED(persist_file->Load(shortcut, STGM_READWRITE))) + return; + i_shell_link->Swap(shell_link); + i_persist_file->Swap(persist_file); } } // namespace diff --git a/chromium/base/win/startup_information.cc b/chromium/base/win/startup_information.cc index a78508dcad5..c7be1ca48f6 100644 --- a/chromium/base/win/startup_information.cc +++ b/chromium/base/win/startup_information.cc @@ -4,7 +4,6 @@ #include "base/win/startup_information.h" -#include "base/logging.h" namespace base { namespace win { diff --git a/chromium/base/win/windows_types.h b/chromium/base/win/windows_types.h index 022c6a2782a..702531cbd22 100644 --- a/chromium/base/win/windows_types.h +++ b/chromium/base/win/windows_types.h @@ -68,6 +68,8 @@ typedef LONG_PTR SSIZE_T, *PSSIZE_T; typedef DWORD ACCESS_MASK; typedef ACCESS_MASK REGSAM; +typedef LONG NTSTATUS; + // As defined in guiddef.h. #ifndef _REFGUID_DEFINED #define _REFGUID_DEFINED diff --git a/chromium/base/win/windows_version.cc b/chromium/base/win/windows_version.cc index 0b5362819b5..0905602ee96 100644 --- a/chromium/base/win/windows_version.cc +++ b/chromium/base/win/windows_version.cc @@ -10,10 +10,11 @@ #include <tuple> #include <utility> +#include "base/check_op.h" #include "base/file_version_info_win.h" #include "base/files/file_path.h" -#include "base/logging.h" #include "base/no_destructor.h" +#include "base/notreached.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" #include "base/win/registry.h" @@ -198,19 +199,6 @@ Version OSInfo::Kernel32Version() const { return kernel32_version; } -Version OSInfo::UcrtVersion() const { - auto ucrt_version_info = FileVersionInfoWin::CreateFileVersionInfoWin( - FilePath(FILE_PATH_LITERAL("ucrtbase.dll"))); - if (ucrt_version_info) { - auto ucrt_components = ucrt_version_info->GetFileVersion().components(); - if (ucrt_components.size() == 4) { - return MajorMinorBuildToVersion(ucrt_components[0], ucrt_components[1], - ucrt_components[2]); - } - } - return Version(); -} - // Retrieve a version from kernel32. This is useful because when running in // compatibility mode for a down-level version of the OS, the file version of // kernel32 will still be the "real" version. diff --git a/chromium/base/win/windows_version.h b/chromium/base/win/windows_version.h index f7a25e78fe2..7b70619e27f 100644 --- a/chromium/base/win/windows_version.h +++ b/chromium/base/win/windows_version.h @@ -121,7 +121,6 @@ class BASE_EXPORT OSInfo { const Version& version() const { return version_; } Version Kernel32Version() const; - Version UcrtVersion() const; base::Version Kernel32BaseVersion() const; // The next two functions return arrays of values, [major, minor(, build)]. const VersionNumber& version_number() const { return version_number_; } diff --git a/chromium/base/win/windows_version_unittest.cc b/chromium/base/win/windows_version_unittest.cc index 06d4cfe0ef4..bd9d5048944 100644 --- a/chromium/base/win/windows_version_unittest.cc +++ b/chromium/base/win/windows_version_unittest.cc @@ -4,7 +4,7 @@ #include "base/win/windows_version.h" -#include "base/logging.h" +#include "base/check_op.h" #include "testing/gtest/include/gtest/gtest.h" namespace base { diff --git a/chromium/base/win/wrapped_window_proc.cc b/chromium/base/win/wrapped_window_proc.cc index 355f0e2f1f1..a27ed899952 100644 --- a/chromium/base/win/wrapped_window_proc.cc +++ b/chromium/base/win/wrapped_window_proc.cc @@ -5,7 +5,8 @@ #include "base/win/wrapped_window_proc.h" #include "base/atomicops.h" -#include "base/logging.h" +#include "base/check.h" +#include "base/notreached.h" #include "base/strings/string_util.h" namespace { |