summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2015-10-13 13:24:50 +0200
committerAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2015-10-14 10:57:25 +0000
commitaf3d4809763ef308f08ced947a73b624729ac7ea (patch)
tree4402b911e30383f6c6dace1e8cf3b8e85355db3a /chromium/base
parent0e8ff63a407fe323e215bb1a2c423c09a4747c8a (diff)
downloadqtwebengine-chromium-af3d4809763ef308f08ced947a73b624729ac7ea.tar.gz
BASELINE: Update Chromium to 47.0.2526.14
Also adding in sources needed for spellchecking. Change-Id: Idd44170fa1616f26315188970a8d5ba7d472b18a Reviewed-by: Michael BrĂ¼ning <michael.bruning@theqtcompany.com>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn191
-rw-r--r--chromium/base/DEPS1
-rw-r--r--chromium/base/OWNERS5
-rw-r--r--chromium/base/allocator/BUILD.gn92
-rw-r--r--chromium/base/allocator/README11
-rw-r--r--chromium/base/allocator/allocator.gyp118
-rw-r--r--chromium/base/allocator/allocator_shim_win.cc3
-rw-r--r--chromium/base/allocator/type_profiler.cc63
-rw-r--r--chromium/base/allocator/type_profiler.h40
-rw-r--r--chromium/base/allocator/type_profiler_control.cc38
-rw-r--r--chromium/base/allocator/type_profiler_control.h31
-rw-r--r--chromium/base/allocator/type_profiler_map_unittest.cc99
-rw-r--r--chromium/base/allocator/type_profiler_tcmalloc.cc37
-rw-r--r--chromium/base/allocator/type_profiler_tcmalloc.h29
-rw-r--r--chromium/base/allocator/type_profiler_unittest.cc189
-rw-r--r--chromium/base/android/animation_frame_time_histogram.cc6
-rw-r--r--chromium/base/android/apk_assets.cc2
-rw-r--r--chromium/base/android/application_status_listener.cc2
-rw-r--r--chromium/base/android/build_info.h3
-rw-r--r--chromium/base/android/command_line_android.cc32
-rw-r--r--chromium/base/android/cpu_features.cc4
-rw-r--r--chromium/base/android/field_trial_list.cc14
-rw-r--r--chromium/base/android/important_file_writer_android.cc6
-rw-r--r--chromium/base/android/library_loader/library_loader_hooks.cc24
-rw-r--r--chromium/base/android/library_loader/library_prefetcher.cc13
-rw-r--r--chromium/base/android/linker/BUILD.gn13
-rw-r--r--chromium/base/android/linker/android_dlext.h77
-rw-r--r--chromium/base/android/memory_pressure_listener_android.cc5
-rw-r--r--chromium/base/android/path_service_android.cc5
-rw-r--r--chromium/base/android/path_utils_unittest.cc23
-rw-r--r--chromium/base/android/record_histogram.cc88
-rw-r--r--chromium/base/android/record_user_action.cc4
-rw-r--r--chromium/base/android/scoped_java_ref.cc9
-rw-r--r--chromium/base/android/scoped_java_ref.h60
-rw-r--r--chromium/base/android/trace_event_binding.cc64
-rw-r--r--chromium/base/async_socket_io_handler.h110
-rw-r--r--chromium/base/async_socket_io_handler_posix.cc98
-rw-r--r--chromium/base/async_socket_io_handler_unittest.cc171
-rw-r--r--chromium/base/async_socket_io_handler_win.cc77
-rw-r--r--chromium/base/base.gyp101
-rw-r--r--chromium/base/base.gypi36
-rw-r--r--chromium/base/base.isolate40
-rw-r--r--chromium/base/base_nacl.gyp2
-rw-r--r--chromium/base/base_paths_win.h4
-rw-r--r--chromium/base/base_switches.cc10
-rw-r--r--chromium/base/base_switches.h1
-rw-r--r--chromium/base/base_unittests.isolate2
-rw-r--r--chromium/base/base_unittests_apk.isolate24
-rw-r--r--chromium/base/basictypes.h3
-rw-r--r--chromium/base/bits.h6
-rw-r--r--chromium/base/bits_unittest.cc15
-rw-r--r--chromium/base/callback_internal.h3
-rw-r--r--chromium/base/command_line.cc11
-rw-r--r--chromium/base/command_line_unittest.cc3
-rw-r--r--chromium/base/compiler_specific.h24
-rw-r--r--chromium/base/containers/scoped_ptr_map.h6
-rw-r--r--chromium/base/containers/scoped_ptr_map_unittest.cc25
-rw-r--r--chromium/base/cpu.cc16
-rw-r--r--chromium/base/cpu.h3
-rw-r--r--chromium/base/cpu_unittest.cc87
-rw-r--r--chromium/base/debug/crash_logging.cc2
-rw-r--r--chromium/base/debug/debugger_posix.cc7
-rw-r--r--chromium/base/debug/gdi_debug_util_win.cc6
-rw-r--r--chromium/base/debug/proc_maps_linux.cc4
-rw-r--r--chromium/base/debug/stack_trace.h17
-rw-r--r--chromium/base/debug/stack_trace_posix.cc44
-rw-r--r--chromium/base/debug/stack_trace_win.cc128
-rw-r--r--chromium/base/debug/task_annotator.cc29
-rw-r--r--chromium/base/debug/task_annotator.h14
-rw-r--r--chromium/base/debug/task_annotator_unittest.cc3
-rw-r--r--chromium/base/debug_message.cc17
-rw-r--r--chromium/base/environment.cc4
-rw-r--r--chromium/base/feature_list.cc170
-rw-r--r--chromium/base/feature_list.h209
-rw-r--r--chromium/base/feature_list_unittest.cc310
-rw-r--r--chromium/base/file_descriptor_posix.h13
-rw-r--r--chromium/base/file_version_info.h23
-rw-r--r--chromium/base/files/OWNERS3
-rw-r--r--chromium/base/files/dir_reader_posix_unittest.cc6
-rw-r--r--chromium/base/files/file.h53
-rw-r--r--chromium/base/files/file_enumerator_win.cc3
-rw-r--r--chromium/base/files/file_path.cc12
-rw-r--r--chromium/base/files/file_path_unittest.cc8
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc31
-rw-r--r--chromium/base/files/file_path_watcher_win.cc4
-rw-r--r--chromium/base/files/file_posix.cc43
-rw-r--r--chromium/base/files/file_proxy_unittest.cc8
-rw-r--r--chromium/base/files/file_tracing.h2
-rw-r--r--chromium/base/files/file_unittest.cc71
-rw-r--r--chromium/base/files/file_util_posix.cc12
-rw-r--r--chromium/base/files/file_util_unittest.cc15
-rw-r--r--chromium/base/files/important_file_writer.cc57
-rw-r--r--chromium/base/files/important_file_writer.h26
-rw-r--r--chromium/base/files/important_file_writer_unittest.cc10
-rw-r--r--chromium/base/guid_unittest.cc4
-rw-r--r--chromium/base/i18n/break_iterator.cc8
-rw-r--r--chromium/base/i18n/break_iterator.h27
-rw-r--r--chromium/base/i18n/break_iterator_unittest.cc85
-rw-r--r--chromium/base/i18n/case_conversion.h4
-rw-r--r--chromium/base/i18n/case_conversion_unittest.cc7
-rw-r--r--chromium/base/i18n/message_formatter.cc141
-rw-r--r--chromium/base/i18n/message_formatter.h111
-rw-r--r--chromium/base/i18n/message_formatter_unittest.cc180
-rw-r--r--chromium/base/i18n/number_formatting_unittest.cc5
-rw-r--r--chromium/base/i18n/rtl.cc42
-rw-r--r--chromium/base/i18n/rtl.h6
-rw-r--r--chromium/base/i18n/rtl_unittest.cc26
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc6
-rw-r--r--chromium/base/id_map.h14
-rw-r--r--chromium/base/id_map_unittest.cc12
-rw-r--r--chromium/base/ios/ios_util.h7
-rw-r--r--chromium/base/ios/ios_util.mm7
-rw-r--r--chromium/base/json/json_parser.h21
-rw-r--r--chromium/base/json/string_escape.cc8
-rw-r--r--chromium/base/json/string_escape_unittest.cc4
-rw-r--r--chromium/base/linux_util.cc8
-rw-r--r--chromium/base/logging.cc138
-rw-r--r--chromium/base/logging.h72
-rw-r--r--chromium/base/logging_unittest.cc24
-rw-r--r--chromium/base/logging_win.cc4
-rw-r--r--chromium/base/logging_win.h4
-rw-r--r--chromium/base/mac/foundation_util.h8
-rw-r--r--chromium/base/mac/foundation_util.mm14
-rw-r--r--chromium/base/mac/foundation_util_unittest.mm12
-rw-r--r--chromium/base/mac/mac_util.h5
-rw-r--r--chromium/base/mac/mac_util.mm14
-rw-r--r--chromium/base/mac/mac_util_unittest.mm44
-rw-r--r--chromium/base/mac/scoped_nsexception_enabler.h54
-rw-r--r--chromium/base/mac/scoped_nsexception_enabler.mm63
-rw-r--r--chromium/base/mac/sdk_forward_declarations.h6
-rw-r--r--chromium/base/mac/sdk_forward_declarations.mm5
-rw-r--r--chromium/base/macros.h23
-rw-r--r--chromium/base/memory/BUILD.gn7
-rw-r--r--chromium/base/memory/discardable_memory.h13
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc18
-rw-r--r--chromium/base/memory/discardable_shared_memory.h3
-rw-r--r--chromium/base/memory/discardable_shared_memory_unittest.cc11
-rw-r--r--chromium/base/memory/memory_pressure_listener.cc30
-rw-r--r--chromium/base/memory/memory_pressure_listener.h10
-rw-r--r--chromium/base/memory/memory_pressure_listener_unittest.cc78
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.cc5
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.h3
-rw-r--r--chromium/base/memory/memory_pressure_monitor_win.h2
-rw-r--r--chromium/base/memory/scoped_ptr.h25
-rw-r--r--chromium/base/memory/scoped_ptr_unittest.cc23
-rw-r--r--chromium/base/memory/scoped_vector_unittest.cc15
-rw-r--r--chromium/base/memory/shared_memory.h38
-rw-r--r--chromium/base/memory/shared_memory_handle.h106
-rw-r--r--chromium/base/memory/shared_memory_handle_mac.cc169
-rw-r--r--chromium/base/memory/shared_memory_handle_win.cc68
-rw-r--r--chromium/base/memory/shared_memory_mac.cc247
-rw-r--r--chromium/base/memory/shared_memory_mac_unittest.cc323
-rw-r--r--chromium/base/memory/shared_memory_posix.cc18
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc101
-rw-r--r--chromium/base/memory/shared_memory_win.cc40
-rw-r--r--chromium/base/memory/singleton.h54
-rw-r--r--chromium/base/memory/singleton_unittest.cc11
-rw-r--r--chromium/base/memory/weak_ptr.h1
-rw-r--r--chromium/base/message_loop/incoming_task_queue.cc2
-rw-r--r--chromium/base/message_loop/message_loop.cc30
-rw-r--r--chromium/base/message_loop/message_loop.h18
-rw-r--r--chromium/base/message_loop/message_loop_task_runner_unittest.cc6
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc8
-rw-r--r--chromium/base/message_loop/message_pump_android.cc6
-rw-r--r--chromium/base/message_loop/message_pump_glib.cc4
-rw-r--r--chromium/base/message_loop/message_pump_libevent.cc32
-rw-r--r--chromium/base/message_loop/message_pump_libevent.h5
-rw-r--r--chromium/base/message_loop/message_pump_mac.h2
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc7
-rw-r--r--chromium/base/message_loop/message_pump_win.cc36
-rw-r--r--chromium/base/message_loop/message_pump_win.h2
-rw-r--r--chromium/base/metrics/BUILD.gn4
-rw-r--r--chromium/base/metrics/bucket_ranges.h1
-rw-r--r--chromium/base/metrics/field_trial.cc29
-rw-r--r--chromium/base/metrics/field_trial.h24
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc24
-rw-r--r--chromium/base/metrics/histogram.cc12
-rw-r--r--chromium/base/metrics/histogram.h2
-rw-r--r--chromium/base/metrics/histogram_base.cc11
-rw-r--r--chromium/base/metrics/histogram_base.h16
-rw-r--r--chromium/base/metrics/histogram_macros.h3
-rw-r--r--chromium/base/metrics/histogram_unittest.cc24
-rw-r--r--chromium/base/metrics/sparse_histogram.cc16
-rw-r--r--chromium/base/metrics/sparse_histogram.h1
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc19
-rw-r--r--chromium/base/metrics/statistics_recorder.cc68
-rw-r--r--chromium/base/metrics/statistics_recorder.h26
-rw-r--r--chromium/base/metrics/statistics_recorder_unittest.cc176
-rw-r--r--chromium/base/nix/xdg_util.cc16
-rw-r--r--chromium/base/nix/xdg_util.h5
-rw-r--r--chromium/base/nix/xdg_util_unittest.cc13
-rw-r--r--chromium/base/numerics/safe_conversions.h18
-rw-r--r--chromium/base/numerics/safe_conversions_impl.h62
-rw-r--r--chromium/base/numerics/safe_math.h21
-rw-r--r--chromium/base/numerics/safe_math_impl.h41
-rw-r--r--chromium/base/numerics/safe_numerics_unittest.cc164
-rw-r--r--chromium/base/path_service.cc81
-rw-r--r--chromium/base/path_service.h17
-rw-r--r--chromium/base/path_service_unittest.cc121
-rw-r--r--chromium/base/pickle.cc63
-rw-r--r--chromium/base/pickle.h25
-rw-r--r--chromium/base/pickle_unittest.cc96
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.h2
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_android.cc6
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_win.cc1
-rw-r--r--chromium/base/prefs/OWNERS1
-rw-r--r--chromium/base/prefs/json_pref_store.cc37
-rw-r--r--chromium/base/prefs/json_pref_store.h1
-rw-r--r--chromium/base/prefs/persistent_pref_store.h3
-rw-r--r--chromium/base/prefs/pref_value_store.h8
-rw-r--r--chromium/base/process/BUILD.gn7
-rw-r--r--chromium/base/process/internal_linux.cc5
-rw-r--r--chromium/base/process/kill.h1
-rw-r--r--chromium/base/process/kill_win.cc2
-rw-r--r--chromium/base/process/launch.h21
-rw-r--r--chromium/base/process/launch_mac.cc17
-rw-r--r--chromium/base/process/launch_posix.cc38
-rw-r--r--chromium/base/process/launch_win.cc188
-rw-r--r--chromium/base/process/memory_mac.mm5
-rw-r--r--chromium/base/process/memory_unittest.cc78
-rw-r--r--chromium/base/process/memory_win.cc30
-rw-r--r--chromium/base/process/port_provider_mac.h29
-rw-r--r--chromium/base/process/process.h20
-rw-r--r--chromium/base/process/process_info_linux.cc2
-rw-r--r--chromium/base/process/process_info_mac.cc2
-rw-r--r--chromium/base/process/process_iterator_mac.cc3
-rw-r--r--chromium/base/process/process_linux.cc4
-rw-r--r--chromium/base/process/process_mac.cc128
-rw-r--r--chromium/base/process/process_metrics.h84
-rw-r--r--chromium/base/process/process_metrics_ios.cc12
-rw-r--r--chromium/base/process/process_metrics_linux.cc13
-rw-r--r--chromium/base/process/process_metrics_mac.cc37
-rw-r--r--chromium/base/process/process_metrics_unittest.cc10
-rw-r--r--chromium/base/process/process_metrics_win.cc37
-rw-r--r--chromium/base/process/process_posix.cc14
-rw-r--r--chromium/base/process/process_unittest.cc32
-rw-r--r--chromium/base/process/process_util_unittest.cc19
-rw-r--r--chromium/base/process/process_win.cc20
-rw-r--r--chromium/base/profiler/native_stack_sampler_posix.cc (renamed from chromium/base/profiler/stack_sampling_profiler_posix.cc)0
-rw-r--r--chromium/base/profiler/native_stack_sampler_win.cc (renamed from chromium/base/profiler/stack_sampling_profiler_win.cc)69
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc219
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.h80
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc228
-rw-r--r--chromium/base/profiler/win32_stack_frame_unwinder.cc212
-rw-r--r--chromium/base/profiler/win32_stack_frame_unwinder.h83
-rw-r--r--chromium/base/profiler/win32_stack_frame_unwinder_unittest.cc231
-rw-r--r--chromium/base/security_unittest.cc194
-rw-r--r--chromium/base/strings/string_number_conversions.cc109
-rw-r--r--chromium/base/strings/string_number_conversions.h6
-rw-r--r--chromium/base/strings/string_piece.cc13
-rw-r--r--chromium/base/strings/string_piece.h23
-rw-r--r--chromium/base/strings/string_split.cc173
-rw-r--r--chromium/base/strings/string_split.h66
-rw-r--r--chromium/base/strings/string_split_unittest.cc104
-rw-r--r--chromium/base/strings/string_util.cc301
-rw-r--r--chromium/base/strings/string_util.h230
-rw-r--r--chromium/base/strings/string_util_posix.h8
-rw-r--r--chromium/base/strings/string_util_unittest.cc192
-rw-r--r--chromium/base/strings/string_util_win.h8
-rw-r--r--chromium/base/strings/sys_string_conversions_unittest.cc6
-rw-r--r--chromium/base/strings/utf_string_conversion_utils.h1
-rw-r--r--chromium/base/strings/utf_string_conversions.cc22
-rw-r--r--chromium/base/strings/utf_string_conversions.h10
-rw-r--r--chromium/base/sync_socket.h6
-rw-r--r--chromium/base/sync_socket_unittest.cc9
-rw-r--r--chromium/base/sync_socket_win.cc6
-rw-r--r--chromium/base/synchronization/condition_variable.h3
-rw-r--r--chromium/base/synchronization/condition_variable_posix.cc25
-rw-r--r--chromium/base/synchronization/condition_variable_win.cc3
-rw-r--r--chromium/base/synchronization/lock.cc7
-rw-r--r--chromium/base/synchronization/lock.h9
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_win.cc2
-rw-r--r--chromium/base/synchronization/waitable_event_win.cc2
-rw-r--r--chromium/base/sys_info.cc2
-rw-r--r--chromium/base/sys_info.h9
-rw-r--r--chromium/base/sys_info_android.cc34
-rw-r--r--chromium/base/third_party/dynamic_annotations/BUILD.gn2
-rw-r--r--chromium/base/third_party/superfasthash/OWNERS1
-rw-r--r--chromium/base/thread_task_runner_handle.cc2
-rw-r--r--chromium/base/threading/platform_thread.h52
-rw-r--r--chromium/base/threading/platform_thread_android.cc18
-rw-r--r--chromium/base/threading/platform_thread_freebsd.cc10
-rw-r--r--chromium/base/threading/platform_thread_internal_posix.h20
-rw-r--r--chromium/base/threading/platform_thread_linux.cc12
-rw-r--r--chromium/base/threading/platform_thread_mac.mm8
-rw-r--r--chromium/base/threading/platform_thread_posix.cc114
-rw-r--r--chromium/base/threading/platform_thread_unittest.cc193
-rw-r--r--chromium/base/threading/platform_thread_win.cc47
-rw-r--r--chromium/base/threading/post_task_and_reply_impl.h2
-rw-r--r--chromium/base/threading/sequenced_worker_pool.cc19
-rw-r--r--chromium/base/threading/sequenced_worker_pool_unittest.cc12
-rw-r--r--chromium/base/threading/thread.cc61
-rw-r--r--chromium/base/threading/thread.h35
-rw-r--r--chromium/base/threading/thread_checker.h12
-rw-r--r--chromium/base/threading/thread_id_name_manager.h5
-rw-r--r--chromium/base/threading/thread_id_name_manager_unittest.cc12
-rw-r--r--chromium/base/threading/thread_local_storage.cc6
-rw-r--r--chromium/base/threading/thread_local_storage.h7
-rw-r--r--chromium/base/threading/thread_restrictions.cc4
-rw-r--r--chromium/base/threading/thread_restrictions.h7
-rw-r--r--chromium/base/threading/thread_unittest.cc51
-rw-r--r--chromium/base/threading/worker_pool.cc1
-rw-r--r--chromium/base/threading/worker_pool.h17
-rw-r--r--chromium/base/threading/worker_pool_posix.cc195
-rw-r--r--chromium/base/threading/worker_pool_posix.h58
-rw-r--r--chromium/base/threading/worker_pool_posix_unittest.cc180
-rw-r--r--chromium/base/threading/worker_pool_win.cc6
-rw-r--r--chromium/base/time/time.cc14
-rw-r--r--chromium/base/time/time.h40
-rw-r--r--chromium/base/time/time_unittest.cc130
-rw-r--r--chromium/base/time/time_win.cc56
-rw-r--r--chromium/base/timer/timer.h49
-rw-r--r--chromium/base/timer/timer_unittest.cc42
-rw-r--r--chromium/base/tools_sanity_unittest.cc15
-rw-r--r--chromium/base/trace_event/BUILD.gn13
-rw-r--r--chromium/base/trace_event/OWNERS1
-rw-r--r--chromium/base/trace_event/java_heap_dump_provider_android.cc3
-rw-r--r--chromium/base/trace_event/java_heap_dump_provider_android.h3
-rw-r--r--chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc3
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc5
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.h3
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.cc2
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.h2
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc16
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc258
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h91
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc638
-rw-r--r--chromium/base/trace_event/memory_dump_provider.h16
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc37
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h30
-rw-r--r--chromium/base/trace_event/memory_profiler_allocation_context.cc91
-rw-r--r--chromium/base/trace_event/memory_profiler_allocation_context.h119
-rw-r--r--chromium/base/trace_event/memory_profiler_allocation_context_unittest.cc210
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc7
-rw-r--r--chromium/base/trace_event/process_memory_dump.h8
-rw-r--r--chromium/base/trace_event/process_memory_maps_dump_provider.cc11
-rw-r--r--chromium/base/trace_event/process_memory_maps_dump_provider.h3
-rw-r--r--chromium/base/trace_event/process_memory_maps_dump_provider_unittest.cc9
-rw-r--r--chromium/base/trace_event/process_memory_totals_dump_provider.cc3
-rw-r--r--chromium/base/trace_event/process_memory_totals_dump_provider.h3
-rw-r--r--chromium/base/trace_event/process_memory_totals_dump_provider_unittest.cc5
-rw-r--r--chromium/base/trace_event/trace_buffer.cc396
-rw-r--r--chromium/base/trace_event/trace_buffer.h130
-rw-r--r--chromium/base/trace_event/trace_config.cc110
-rw-r--r--chromium/base/trace_event/trace_config.h37
-rw-r--r--chromium/base/trace_event/trace_config_memory_test_util.h76
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc37
-rw-r--r--chromium/base/trace_event/trace_event.gypi13
-rw-r--r--chromium/base/trace_event/trace_event.h1108
-rw-r--r--chromium/base/trace_event/trace_event_android.cc2
-rw-r--r--chromium/base/trace_event/trace_event_argument.cc24
-rw-r--r--chromium/base/trace_event/trace_event_argument_unittest.cc13
-rw-r--r--chromium/base/trace_event/trace_event_common.h1038
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.cc127
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.h17
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc2198
-rw-r--r--chromium/base/trace_event/trace_event_impl.h576
-rw-r--r--chromium/base/trace_event/trace_event_memory.h6
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc22
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.h3
-rw-r--r--chromium/base/trace_event/trace_event_synthetic_delay.cc43
-rw-r--r--chromium/base/trace_event/trace_event_system_stats_monitor.h6
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc15
-rw-r--r--chromium/base/trace_event/trace_event_win.h4
-rw-r--r--chromium/base/trace_event/trace_log.cc1728
-rw-r--r--chromium/base/trace_event/trace_log.h494
-rw-r--r--chromium/base/trace_event/trace_log_constants.cc (renamed from chromium/base/trace_event/trace_event_impl_constants.cc)2
-rw-r--r--chromium/base/trace_event/trace_sampling_thread.cc101
-rw-r--r--chromium/base/trace_event/trace_sampling_thread.h54
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win.cc22
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win.h7
-rw-r--r--chromium/base/trace_event/winheap_dump_provider_win_unittest.cc3
-rw-r--r--chromium/base/tracked_objects.cc23
-rw-r--r--chromium/base/tracked_objects.h5
-rw-r--r--chromium/base/values.cc25
-rw-r--r--chromium/base/values.h6
-rw-r--r--chromium/base/version.cc4
-rw-r--r--chromium/base/win/OWNERS1
-rw-r--r--chromium/base/win/event_trace_controller.cc3
-rw-r--r--chromium/base/win/event_trace_controller_unittest.cc16
-rw-r--r--chromium/base/win/message_window.cc1
-rw-r--r--chromium/base/win/object_watcher.cc78
-rw-r--r--chromium/base/win/object_watcher.h28
-rw-r--r--chromium/base/win/object_watcher_unittest.cc57
-rw-r--r--chromium/base/win/registry.cc2
-rw-r--r--chromium/base/win/scoped_comptr.h38
-rw-r--r--chromium/base/win/scoped_handle.h9
-rw-r--r--chromium/base/win/scoped_handle_unittest.cc32
-rw-r--r--chromium/base/win/shortcut.cc35
-rw-r--r--chromium/base/win/shortcut.h22
-rw-r--r--chromium/base/win/win_util.cc44
-rw-r--r--chromium/base/win/win_util.h7
392 files changed, 13755 insertions, 9462 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 39bc2c63025..2cb2f6dc586 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/compiler/compiler.gni")
import("//build/config/ui.gni")
import("//testing/test.gni")
@@ -64,8 +65,6 @@ component("base") {
sources = [
"allocator/allocator_extension.cc",
"allocator/allocator_extension.h",
- "allocator/type_profiler_control.cc",
- "allocator/type_profiler_control.h",
"android/animation_frame_time_histogram.cc",
"android/animation_frame_time_histogram.h",
"android/apk_assets.cc",
@@ -132,9 +131,6 @@ component("base") {
"android/thread_utils.h",
"android/trace_event_binding.cc",
"android/trace_event_binding.h",
- "async_socket_io_handler.h",
- "async_socket_io_handler_posix.cc",
- "async_socket_io_handler_win.cc",
"at_exit.cc",
"at_exit.h",
"atomic_ref_count.h",
@@ -186,6 +182,8 @@ component("base") {
"deferred_sequenced_task_runner.h",
"environment.cc",
"environment.h",
+ "feature_list.cc",
+ "feature_list.h",
"file_descriptor_posix.h",
"file_version_info.h",
"file_version_info_mac.h",
@@ -246,10 +244,14 @@ component("base") {
"hash.cc",
"hash.h",
"id_map.h",
+ "ios/crb_protocol_observers.h",
+ "ios/crb_protocol_observers.mm",
"ios/device_util.h",
"ios/device_util.mm",
"ios/ios_util.h",
"ios/ios_util.mm",
+ "ios/ns_error_util.h",
+ "ios/ns_error_util.mm",
"ios/scoped_critical_action.h",
"ios/scoped_critical_action.mm",
"ios/weak_nsobject.h",
@@ -306,8 +308,6 @@ component("base") {
"mac/scoped_mach_vm.h",
"mac/scoped_nsautorelease_pool.h",
"mac/scoped_nsautorelease_pool.mm",
- "mac/scoped_nsexception_enabler.h",
- "mac/scoped_nsexception_enabler.mm",
"mac/scoped_nsobject.h",
"mac/scoped_objc_class_swizzler.h",
"mac/scoped_objc_class_swizzler.mm",
@@ -392,14 +392,14 @@ component("base") {
"profiler/alternate_timer.h",
"profiler/native_stack_sampler.cc",
"profiler/native_stack_sampler.h",
+ "profiler/native_stack_sampler_posix.cc",
+ "profiler/native_stack_sampler_win.cc",
"profiler/scoped_profile.cc",
"profiler/scoped_profile.h",
"profiler/scoped_tracker.cc",
"profiler/scoped_tracker.h",
"profiler/stack_sampling_profiler.cc",
"profiler/stack_sampling_profiler.h",
- "profiler/stack_sampling_profiler_posix.cc",
- "profiler/stack_sampling_profiler_win.cc",
"profiler/tracked_time.cc",
"profiler/tracked_time.h",
"rand_util.cc",
@@ -640,12 +640,34 @@ component("base") {
"win/wrapped_window_proc.h",
]
+ if (is_ios) {
+ sources -= [
+ "files/file_path_watcher.cc",
+ "files/file_path_watcher.h",
+ "files/file_path_watcher_fsevents.cc",
+ "files/file_path_watcher_fsevents.h",
+ "files/file_path_watcher_kqueue.cc",
+ "files/file_path_watcher_kqueue.h",
+ "message_loop/message_pump_libevent.cc",
+ "message_loop/message_pump_libevent.h",
+
+ # These don't work and are unused on iOS.
+ "sync_socket.h",
+ "sync_socket_posix.cc",
+ ]
+ }
+
sources -= [
"sys_info_freebsd.cc",
"sys_info_openbsd.cc",
]
- configs += [ ":base_implementation" ]
+ data = []
+
+ configs += [
+ ":base_implementation",
+ "//build/config:precompiled_headers",
+ ]
deps = [
":base_static",
@@ -708,9 +730,6 @@ component("base") {
set_sources_assignment_filter(sources_assignment_filter)
sources -= [
- "allocator/type_profiler_control.cc",
- "allocator/type_profiler_control.h",
- "async_socket_io_handler_posix.cc",
"cpu.cc",
"files/file_enumerator_posix.cc",
"files/file_proxy.cc",
@@ -740,6 +759,11 @@ component("base") {
# Windows.
if (is_win) {
+ sources += [
+ "profiler/win32_stack_frame_unwinder.cc",
+ "profiler/win32_stack_frame_unwinder.h",
+ ]
+
sources -= [
"message_loop/message_pump_libevent.cc",
"strings/string16.cc",
@@ -749,6 +773,30 @@ component("base") {
"sha1_win.cc",
]
+ # Required for base/stack_trace_win.cc to symbolize correctly.
+ data += [ "$root_build_dir/dbghelp.dll" ]
+
+ if (is_component_build) {
+ # Copy the VS runtime DLLs into the isolate so that they don't have to be
+ # preinstalled on the target machine. The debug runtimes have a "d" at
+ # the end.
+ if (is_debug) {
+ vcrt_suffix = "d"
+ } else {
+ vcrt_suffix = ""
+ }
+
+ # These runtime files are copied to the output directory by the
+ # vs_toolchain script that runs as part of toolchain configuration.
+ data += [
+ "$root_out_dir/msvcp120${vcrt_suffix}.dll",
+ "$root_out_dir/msvcr120${vcrt_suffix}.dll",
+ ]
+ if (is_asan) {
+ data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/3.8.0/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+ }
+ }
+
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
@@ -773,11 +821,9 @@ component("base") {
"threading/platform_thread_internal_posix.cc",
]
- if (is_ios) {
- sources -= [
- "files/file_path_watcher_fsevents.cc",
- "files/file_path_watcher_fsevents.h",
- ]
+ if (is_asan) {
+ # TODO(GYP) hook up asan on Mac. GYP has this extra dylib:
+ #data += [ "$root_out_dir/libclang_rt.asan_osx_dynamic.dylib" ]
}
} else {
# Non-Mac.
@@ -791,6 +837,11 @@ component("base") {
# Linux.
if (is_linux) {
+ if (is_asan || is_lsan || is_msan || is_tsan) {
+ # For llvm-sanitizer.
+ data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ]
+ }
+
# TODO(brettw) this will need to be parameterized at some point.
linux_configs = []
if (use_glib) {
@@ -872,9 +923,19 @@ component("base") {
]
}
+ if (is_asan || is_lsan || is_msan || is_tsan) {
+ data += [ "//tools/valgrind/asan/" ]
+ if (is_win) {
+ data +=
+ [ "//third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer.exe" ]
+ } else {
+ data += [ "//third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer" ]
+ }
+ }
+
configs += [ "//build/config/compiler:wexit_time_destructors" ]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
@@ -893,7 +954,7 @@ source_set("base_static") {
]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
}
@@ -920,6 +981,8 @@ component("i18n") {
"i18n/icu_string_conversions.h",
"i18n/icu_util.cc",
"i18n/icu_util.h",
+ "i18n/message_formatter.cc",
+ "i18n/message_formatter.h",
"i18n/number_formatting.cc",
"i18n/number_formatting.h",
"i18n/rtl.cc",
@@ -939,14 +1002,16 @@ component("i18n") {
]
defines = [ "BASE_I18N_IMPLEMENTATION" ]
configs += [ "//build/config/compiler:wexit_time_destructors" ]
+ public_deps = [
+ "//third_party/icu",
+ ]
deps = [
":base",
"//base/third_party/dynamic_annotations",
- "//third_party/icu",
]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
@@ -954,7 +1019,7 @@ component("i18n") {
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
}
-if (is_win || (is_linux && !is_chromeos)) {
+if (is_ios || is_win || (is_linux && !is_chromeos)) {
# TODO(GYP): Figure out which of these work and are needed on other platforms.
test("base_perftests") {
sources = [
@@ -996,6 +1061,7 @@ if (is_win || (is_linux && !is_chromeos)) {
]
deps = [
":base",
+ "//build/config/sanitizers:deps",
"//third_party/icu:icuuc",
]
}
@@ -1006,6 +1072,7 @@ if (is_win || (is_linux && !is_chromeos)) {
]
deps = [
":base",
+ "//build/config/sanitizers:deps",
]
}
}
@@ -1013,23 +1080,18 @@ if (is_win || (is_linux && !is_chromeos)) {
component("prefs") {
sources = [
- "prefs/base_prefs_export.h",
"prefs/default_pref_store.cc",
"prefs/default_pref_store.h",
"prefs/json_pref_store.cc",
"prefs/json_pref_store.h",
"prefs/overlay_user_pref_store.cc",
"prefs/overlay_user_pref_store.h",
- "prefs/persistent_pref_store.h",
"prefs/pref_change_registrar.cc",
"prefs/pref_change_registrar.h",
- "prefs/pref_filter.h",
"prefs/pref_member.cc",
"prefs/pref_member.h",
- "prefs/pref_notifier.h",
"prefs/pref_notifier_impl.cc",
"prefs/pref_notifier_impl.h",
- "prefs/pref_observer.h",
"prefs/pref_registry.cc",
"prefs/pref_registry.h",
"prefs/pref_registry_simple.cc",
@@ -1048,8 +1110,17 @@ component("prefs") {
"prefs/scoped_user_pref_update.h",
"prefs/value_map_pref_store.cc",
"prefs/value_map_pref_store.h",
- "prefs/writeable_pref_store.h",
]
+ if (!is_ios) {
+ sources += [
+ "prefs/base_prefs_export.h",
+ "prefs/persistent_pref_store.h",
+ "prefs/pref_filter.h",
+ "prefs/pref_notifier.h",
+ "prefs/pref_observer.h",
+ "prefs/writeable_pref_store.h",
+ ]
+ }
defines = [ "BASE_PREFS_IMPLEMENTATION" ]
@@ -1058,7 +1129,7 @@ component("prefs") {
]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
}
@@ -1115,6 +1186,9 @@ if (is_win) {
"cfgmgr32.lib",
"shell32.lib",
]
+ deps = [
+ "//build/config/sanitizers:deps",
+ ]
}
}
@@ -1138,7 +1212,6 @@ test("base_unittests") {
"android/path_utils_unittest.cc",
"android/scoped_java_ref_unittest.cc",
"android/sys_utils_unittest.cc",
- "async_socket_io_handler_unittest.cc",
"at_exit_unittest.cc",
"atomicops_unittest.cc",
"barrier_closure_unittest.cc",
@@ -1181,6 +1254,7 @@ test("base_unittests") {
"files/file_util_proxy_unittest.cc",
"files/file_util_unittest.cc",
"files/important_file_writer_unittest.cc",
+ "files/memory_mapped_file_unittest.cc",
"files/scoped_temp_dir_unittest.cc",
"gmock_unittest.cc",
"guid_unittest.cc",
@@ -1190,6 +1264,7 @@ test("base_unittests") {
"i18n/char_iterator_unittest.cc",
"i18n/file_util_icu_unittest.cc",
"i18n/icu_string_conversions_unittest.cc",
+ "i18n/message_formatter_unittest.cc",
"i18n/number_formatting_unittest.cc",
"i18n/rtl_unittest.cc",
"i18n/streaming_utf8_validator_unittest.cc",
@@ -1228,6 +1303,7 @@ test("base_unittests") {
"memory/scoped_ptr_unittest.cc",
"memory/scoped_ptr_unittest.nc",
"memory/scoped_vector_unittest.cc",
+ "memory/shared_memory_mac_unittest.cc",
"memory/shared_memory_unittest.cc",
"memory/singleton_unittest.cc",
"memory/weak_ptr_unittest.cc",
@@ -1240,6 +1316,7 @@ test("base_unittests") {
"metrics/field_trial_unittest.cc",
"metrics/histogram_base_unittest.cc",
"metrics/histogram_delta_serialization_unittest.cc",
+ "metrics/histogram_macros_unittest.cc",
"metrics/histogram_snapshot_manager_unittest.cc",
"metrics/histogram_unittest.cc",
"metrics/sample_map_unittest.cc",
@@ -1272,7 +1349,6 @@ test("base_unittests") {
"process/process_metrics_unittest_ios.cc",
"process/process_unittest.cc",
"process/process_util_unittest.cc",
- "process/process_util_unittest_ios.cc",
"profiler/stack_sampling_profiler_unittest.cc",
"profiler/tracked_time_unittest.cc",
"rand_util_unittest.cc",
@@ -1310,9 +1386,10 @@ test("base_unittests") {
"task/cancelable_task_tracker_unittest.cc",
"task_runner_util_unittest.cc",
"template_util_unittest.cc",
- "test/expectations/expectation_unittest.cc",
- "test/expectations/parser_unittest.cc",
"test/histogram_tester_unittest.cc",
+ "test/icu_test_util.cc",
+ "test/icu_test_util.h",
+ "test/test_pending_task_unittest.cc",
"test/test_reg_util_win_unittest.cc",
"test/trace_event_analyzer_unittest.cc",
"test/user_action_tester_unittest.cc",
@@ -1354,6 +1431,7 @@ test("base_unittests") {
"win/registry_unittest.cc",
"win/scoped_bstr_unittest.cc",
"win/scoped_comptr_unittest.cc",
+ "win/scoped_handle_unittest.cc",
"win/scoped_process_information_unittest.cc",
"win/scoped_variant_unittest.cc",
"win/shortcut_unittest.cc",
@@ -1380,9 +1458,6 @@ test("base_unittests") {
data = [
"test/data/",
-
- # TODO(dpranke): Remove when icu declares this directly.
- "$root_out_dir/icudtl.dat",
]
# Allow more direct string conversions on platforms with native utf8
@@ -1396,14 +1471,21 @@ test("base_unittests") {
":base_java",
":base_java_unittest_support",
]
+
+ # TODO(brettw) I think this should not be here, we should not be using
+ # isolate files.
isolate_file = "base_unittests.isolate"
}
if (is_ios) {
sources -= [
+ "files/file_path_watcher_unittest.cc",
+ "memory/discardable_shared_memory_unittest.cc",
+ "memory/shared_memory_unittest.cc",
"process/memory_unittest.cc",
"process/process_unittest.cc",
"process/process_util_unittest.cc",
+ "sync_socket_unittest.cc",
]
# Pull in specific Mac files for iOS (which have been filtered out by file
@@ -1414,7 +1496,7 @@ test("base_unittests") {
"mac/foundation_util_unittest.mm",
"mac/objc_property_releaser_unittest.mm",
"mac/scoped_nsobject_unittest.mm",
- "sys_string_conversions_mac_unittest.mm",
+ "strings/sys_string_conversions_mac_unittest.mm",
]
set_sources_assignment_filter(sources_assignment_filter)
@@ -1435,7 +1517,7 @@ test("base_unittests") {
sources -= [ "message_loop/message_pump_glib_unittest.cc" ]
}
- if (is_posix || is_ios) {
+ if (is_posix && !is_ios) {
sources += [ "message_loop/message_pump_libevent_unittest.cc" ]
deps += [ "//third_party/libevent" ]
}
@@ -1449,6 +1531,15 @@ test("base_unittests") {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+ # Symbols for crashes when running tests on swarming.
+ if (symbol_level > 0) {
+ if (is_win) {
+ data += [ "$root_out_dir/base_unittests.exe.pdb" ]
+ } else if (is_mac) {
+ data += [ "$root_out_dir/base_unittests.dSYM/" ]
+ }
+ }
}
if (is_android) {
@@ -1481,7 +1572,7 @@ if (is_android) {
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
]
- deps = [
+ public_deps = [
":android_runtime_jni_headers",
]
@@ -1502,6 +1593,7 @@ if (is_android) {
]
deps = [
+ "//third_party/android_tools:android_support_multidex_java",
"//third_party/jsr-305:jsr_305_javalib",
]
@@ -1533,12 +1625,29 @@ if (is_android) {
DEPRECATED_java_in_dir = "test/android/javatests/src"
}
+ # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
+ # in the multidex shadow library. crbug.com/522043
+ # GYP: //base.gyp:base_junit_test_support
+ java_library("base_junit_test_support") {
+ testonly = true
+ java_files = [ "test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java" ]
+ deps = [
+ "//third_party/android_tools:android_support_multidex_java",
+ "//third_party/robolectric:android-all-4.3_r2-robolectric-0",
+ "//third_party/robolectric:robolectric_java",
+ ]
+ }
+
# GYP: //base.gyp:base_junit_tests
junit_binary("base_junit_tests") {
- java_files = [ "android/junit/src/org/chromium/base/LogTest.java" ]
+ java_files = [
+ "android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
+ "android/junit/src/org/chromium/base/LogTest.java",
+ ]
deps = [
":base_java",
":base_java_test_support",
+ ":base_junit_test_support",
]
}
diff --git a/chromium/base/DEPS b/chromium/base/DEPS
index c632e35d83a..6d91c8d947f 100644
--- a/chromium/base/DEPS
+++ b/chromium/base/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+third_party/apple_apsl",
"+third_party/libevent",
"+third_party/dmg_fp",
+ "+third_party/lss",
"+third_party/mach_override",
"+third_party/modp_b64",
"+third_party/tcmalloc",
diff --git a/chromium/base/OWNERS b/chromium/base/OWNERS
index bcaa81bb64d..76ffc068ba0 100644
--- a/chromium/base/OWNERS
+++ b/chromium/base/OWNERS
@@ -1,7 +1,6 @@
mark@chromium.org
thakis@chromium.org
danakj@chromium.org
-rvargas@chromium.org
thestig@chromium.org
# Chromium is a very mature project, most things that are generally useful are
@@ -26,3 +25,7 @@ per-file security_unittest.cc=jln@chromium.org
per-file *android*=nyquist@chromium.org
per-file *android*=rmcilroy@chromium.org
per-file *android*=yfriedman@chromium.org
+
+# For FeatureList API:
+per-file feature_list*=asvitkine@chromium.org
+per-file feature_list*=isherman@chromium.org
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index c42de1a01a3..32e5e6cdc65 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -4,14 +4,47 @@
import("//build/config/allocator.gni")
+declare_args() {
+ # Provide a way to force disable debugallocation in Debug builds,
+ # e.g. for profiling (it's more rare to profile Debug builds,
+ # but people sometimes need to do that).
+ enable_debugallocation = is_debug
+}
+
# Only executables and not libraries should depend on the allocator target;
# only the application (the final executable) knows what allocator makes sense.
# This "allocator" meta-target will forward to the default allocator according
# to the build settings.
group("allocator") {
+ public_deps = []
if (use_allocator == "tcmalloc") {
- deps = [
- ":tcmalloc",
+ public_deps += [ ":tcmalloc" ]
+ }
+
+ # This condition expresses the win_use_allocator_shim in the GYP build.
+ if (is_win && !is_component_build) {
+ public_deps += [ ":allocator_shim" ]
+ }
+}
+
+# This config defines ALLOCATOR_SHIM in the same conditions that the allocator
+# shim will be used by the allocator target.
+#
+# TODO(brettw) this is only used in one place and is kind of mess, because it
+# assumes that the library using it will eventually be linked with
+# //base/allocator in the default way. Clean this up and delete this.
+config("allocator_shim_define") {
+ if (is_win && !is_component_build) {
+ defines = [ "ALLOCATOR_SHIM" ]
+ }
+}
+
+config("tcmalloc_flags") {
+ if (enable_debugallocation) {
+ defines = [
+ # Use debugallocation for Debug builds to catch problems early
+ # and cleanly, http://crbug.com/30715 .
+ "TCMALLOC_FOR_DEBUGALLOCATION",
]
}
}
@@ -28,16 +61,31 @@ if (is_win) {
libs = [ rebase_path("$target_gen_dir/allocator/libcmt.lib") ]
}
- action("prep_libc") {
- script = "prep_libc.py"
- outputs = [
- "$target_gen_dir/allocator/libcmt.lib",
- ]
- args = [
- visual_studio_path + "/vc/lib",
- rebase_path("$target_gen_dir/allocator"),
- current_cpu,
- ]
+ if (!is_component_build) {
+ action("prep_libc") {
+ script = "prep_libc.py"
+ outputs = [
+ "$target_gen_dir/allocator/libcmt.lib",
+ ]
+ args = [
+ visual_studio_path + "/vc/lib",
+ rebase_path("$target_gen_dir/allocator"),
+ current_cpu,
+ ]
+ }
+
+ source_set("allocator_shim") {
+ sources = [
+ "allocator_shim_win.cc",
+ ]
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+
+ public_configs = [ ":nocmt" ]
+ deps = [
+ ":prep_libc",
+ ]
+ }
}
}
@@ -52,7 +100,7 @@ if (use_allocator == "tcmalloc") {
check_includes = false
sources = [
- # Generated for our configuration from tcmalloc"s build
+ # Generated for our configuration from tcmalloc's build
# and checked in.
"$tcmalloc_dir/src/config.h",
"$tcmalloc_dir/src/config_android.h",
@@ -103,8 +151,6 @@ if (use_allocator == "tcmalloc") {
# #included by debugallocation_shim.cc
#"$tcmalloc_dir/src/debugallocation.cc",
- "$tcmalloc_dir/src/deep-heap-profile.cc",
- "$tcmalloc_dir/src/deep-heap-profile.h",
"$tcmalloc_dir/src/free_list.cc",
"$tcmalloc_dir/src/free_list.h",
"$tcmalloc_dir/src/heap-profile-table.cc",
@@ -149,7 +195,6 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/thread_cache.h",
"$tcmalloc_dir/src/windows/port.cc",
"$tcmalloc_dir/src/windows/port.h",
- "allocator_shim.cc",
"debugallocation_shim.cc",
# These are both #included by allocator_shim for maximal linking.
@@ -167,7 +212,10 @@ if (use_allocator == "tcmalloc") {
]
configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ "//build/config/compiler:no_chromium_code",
+ ":tcmalloc_flags",
+ ]
deps = []
@@ -185,9 +233,6 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/system-alloc.cc",
"$tcmalloc_dir/src/system-alloc.h",
- # included by allocator_shim.cc
- "debugallocation_shim.cc",
-
# cpuprofiler
"$tcmalloc_dir/src/base/thread_lister.c",
"$tcmalloc_dir/src/base/thread_lister.h",
@@ -215,9 +260,6 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src/system-alloc.h",
"$tcmalloc_dir/src/windows/port.cc",
"$tcmalloc_dir/src/windows/port.h",
-
- # TODO(willchan): Support allocator shim later on.
- "allocator_shim.cc",
]
# We enable all warnings by default, but upstream disables a few.
@@ -245,7 +287,7 @@ if (use_allocator == "tcmalloc") {
# Make sure the allocation library is optimized as much as possible when
# we"re in release mode.
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
@@ -264,7 +306,7 @@ source_set("allocator_extension_thunks") {
"allocator_extension_thunks.h",
]
if (is_android && !is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
}
diff --git a/chromium/base/allocator/README b/chromium/base/allocator/README
index ec8a707f413..8a5595fb267 100644
--- a/chromium/base/allocator/README
+++ b/chromium/base/allocator/README
@@ -7,7 +7,6 @@ Currently we can, at runtime, switch between:
the default windows allocator
the windows low-fragmentation-heap
tcmalloc
- jemalloc (the heap used most notably within Mozilla Firefox)
The mechanism for hooking LIBCMT in windows is rather tricky. The core
problem is that by default, the windows library does not declare malloc and
@@ -23,11 +22,10 @@ Source code
This directory contains just the allocator (i.e. shim) layer that switches
between the different underlying memory allocation implementations.
-The tcmalloc and jemalloc libraries originate outside of Chromium
-and exist in ../../third_party/tcmalloc and ../../third_party/jemalloc
-(currently, the actual locations are defined in the allocator.gyp file).
-The third party sources use a vendor-branch SCM pattern to track
-Chromium-specific changes independently from upstream changes.
+The tcmalloc library originates outside of Chromium and exists in
+../../third_party/tcmalloc (currently, the actual location is defined in the
+allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
+track Chromium-specific changes independently from upstream changes.
The general intent is to push local changes upstream so that over
time we no longer need any forked files.
@@ -54,6 +52,5 @@ Usage
You can use the different allocators by setting the environment variable
CHROME_ALLOCATOR to:
"tcmalloc" - TC Malloc (default)
- "jemalloc" - JE Malloc
"winheap" - Windows default heap
"winlfh" - Windows Low-Fragmentation heap
diff --git a/chromium/base/allocator/allocator.gyp b/chromium/base/allocator/allocator.gyp
index d426c9c3f17..ae93e9e03ca 100644
--- a/chromium/base/allocator/allocator.gyp
+++ b/chromium/base/allocator/allocator.gyp
@@ -153,8 +153,6 @@
'<(tcmalloc_dir)/src/common.cc',
'<(tcmalloc_dir)/src/common.h',
'<(tcmalloc_dir)/src/debugallocation.cc',
- '<(tcmalloc_dir)/src/deep-heap-profile.cc',
- '<(tcmalloc_dir)/src/deep-heap-profile.h',
'<(tcmalloc_dir)/src/free_list.cc',
'<(tcmalloc_dir)/src/free_list.h',
'<(tcmalloc_dir)/src/getpc.h',
@@ -296,20 +294,7 @@
'../..',
],
}],
- ['OS=="linux" and clang_type_profiler==1', {
- 'dependencies': [
- 'type_profiler_tcmalloc',
- ],
- # It is undoing dependencies and cflags_cc for type_profiler which
- # build/common.gypi injects into all targets.
- 'dependencies!': [
- 'type_profiler',
- ],
- 'cflags_cc!': [
- '-fintercept-allocation-functions',
- ],
- }],
- ['OS=="win"', {
+ ['OS=="win" and component!="shared_library"', {
'dependencies': [
'libcmt',
],
@@ -382,22 +367,10 @@
'include_dirs': [
'../../'
],
- 'conditions': [
- ['OS=="linux" and clang_type_profiler==1', {
- # It is undoing dependencies and cflags_cc for type_profiler which
- # build/common.gypi injects into all targets.
- 'dependencies!': [
- 'type_profiler',
- ],
- 'cflags_cc!': [
- '-fintercept-allocation-functions',
- ],
- }],
- ],
},
],
'conditions': [
- ['OS=="win"', {
+ ['OS=="win" and component!="shared_library"', {
'targets': [
{
'target_name': 'libcmt',
@@ -462,93 +435,6 @@
},
],
}],
- ['OS=="linux" and clang_type_profiler==1', {
- # Some targets in this section undo dependencies and cflags_cc for
- # type_profiler which build/common.gypi injects into all targets.
- 'targets': [
- {
- 'target_name': 'type_profiler',
- 'type': 'static_library',
- 'dependencies!': [
- 'type_profiler',
- ],
- 'cflags_cc!': [
- '-fintercept-allocation-functions',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'type_profiler.cc',
- 'type_profiler.h',
- 'type_profiler_control.h',
- ],
- 'toolsets': ['host', 'target'],
- },
- {
- 'target_name': 'type_profiler_tcmalloc',
- 'type': 'static_library',
- 'dependencies!': [
- 'type_profiler',
- ],
- 'cflags_cc!': [
- '-fintercept-allocation-functions',
- ],
- 'include_dirs': [
- '<(tcmalloc_dir)/src',
- '../..',
- ],
- 'sources': [
- '<(tcmalloc_dir)/src/gperftools/type_profiler_map.h',
- '<(tcmalloc_dir)/src/type_profiler_map.cc',
- 'type_profiler_tcmalloc.cc',
- 'type_profiler_tcmalloc.h',
- ],
- },
- {
- 'target_name': 'type_profiler_unittests',
- 'type': 'executable',
- 'dependencies': [
- '../../testing/gtest.gyp:gtest',
- '../base.gyp:base',
- 'allocator',
- 'type_profiler_tcmalloc',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [
- 'type_profiler_control.cc',
- 'type_profiler_control.h',
- 'type_profiler_unittest.cc',
- ],
- },
- {
- 'target_name': 'type_profiler_map_unittests',
- 'type': 'executable',
- 'dependencies': [
- '../../testing/gtest.gyp:gtest',
- '../base.gyp:base',
- 'allocator',
- ],
- 'dependencies!': [
- 'type_profiler',
- ],
- 'cflags_cc!': [
- '-fintercept-allocation-functions',
- ],
- 'include_dirs': [
- '<(tcmalloc_dir)/src',
- '../..',
- ],
- 'sources': [
- '<(tcmalloc_dir)/src/gperftools/type_profiler_map.h',
- '<(tcmalloc_dir)/src/type_profiler_map.cc',
- 'type_profiler_map_unittest.cc',
- ],
- },
- ],
- }],
['use_allocator=="tcmalloc"', {
'targets': [
{
diff --git a/chromium/base/allocator/allocator_shim_win.cc b/chromium/base/allocator/allocator_shim_win.cc
index a1473e5fd29..2a933ee93db 100644
--- a/chromium/base/allocator/allocator_shim_win.cc
+++ b/chromium/base/allocator/allocator_shim_win.cc
@@ -172,6 +172,9 @@ void* malloc(size_t size) {
return ptr;
}
+// Symbol to allow weak linkage to win_heap_malloc from memory_win.cc.
+void* (*malloc_unchecked)(size_t) = &win_heap_malloc;
+
// free.c
void free(void* p) {
win_heap_free(p);
diff --git a/chromium/base/allocator/type_profiler.cc b/chromium/base/allocator/type_profiler.cc
deleted file mode 100644
index 635fbcf5ed9..00000000000
--- a/chromium/base/allocator/type_profiler.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/allocator/type_profiler.h"
-
-#include <assert.h>
-
-namespace {
-
-void* NopIntercept(void* ptr, size_t size, const std::type_info& type) {
- return ptr;
-}
-
-base::type_profiler::InterceptFunction* g_new_intercept = NopIntercept;
-base::type_profiler::InterceptFunction* g_delete_intercept = NopIntercept;
-
-}
-
-void* __op_new_intercept__(void* ptr,
- size_t size,
- const std::type_info& type) {
- return g_new_intercept(ptr, size, type);
-}
-
-void* __op_delete_intercept__(void* ptr,
- size_t size,
- const std::type_info& type) {
- return g_delete_intercept(ptr, size, type);
-}
-
-namespace base {
-namespace type_profiler {
-
-// static
-void InterceptFunctions::SetFunctions(InterceptFunction* new_intercept,
- InterceptFunction* delete_intercept) {
- // Don't use DCHECK, as this file is injected into targets
- // that do not and should not depend on base/base.gyp:base
- assert(g_new_intercept == NopIntercept);
- assert(g_delete_intercept == NopIntercept);
-
- g_new_intercept = new_intercept;
- g_delete_intercept = delete_intercept;
-}
-
-// static
-void InterceptFunctions::ResetFunctions() {
- g_new_intercept = NopIntercept;
- g_delete_intercept = NopIntercept;
-}
-
-// static
-bool InterceptFunctions::IsAvailable() {
- return g_new_intercept != NopIntercept || g_delete_intercept != NopIntercept;
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
diff --git a/chromium/base/allocator/type_profiler.h b/chromium/base/allocator/type_profiler.h
deleted file mode 100644
index 86b5711a9d0..00000000000
--- a/chromium/base/allocator/type_profiler.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_TYPE_PROFILER_H_
-#define BASE_ALLOCATOR_TYPE_PROFILER_H_
-
-#if defined(TYPE_PROFILING)
-
-#include <stddef.h> // for size_t
-#include <typeinfo> // for std::typeinfo
-
-namespace base {
-namespace type_profiler {
-
-typedef void* InterceptFunction(void*, size_t, const std::type_info&);
-
-class InterceptFunctions {
- public:
- // It must be called only once in a process while it is in single-thread.
- // For now, ContentMainRunnerImpl::Initialize is the only supposed caller
- // of this function except for single-threaded unit tests.
- static void SetFunctions(InterceptFunction* new_intercept,
- InterceptFunction* delete_intercept);
-
- private:
- friend class TypeProfilerTest;
-
- // These functions are not thread safe.
- // They must be used only from single-threaded unit tests.
- static void ResetFunctions();
- static bool IsAvailable();
-};
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-#endif // BASE_ALLOCATOR_TYPE_PROFILER_H_
diff --git a/chromium/base/allocator/type_profiler_control.cc b/chromium/base/allocator/type_profiler_control.cc
deleted file mode 100644
index 6be79840ed3..00000000000
--- a/chromium/base/allocator/type_profiler_control.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/type_profiler_control.h"
-
-namespace base {
-namespace type_profiler {
-
-namespace {
-
-#if defined(TYPE_PROFILING)
-const bool kTypeProfilingEnabled = true;
-#else
-const bool kTypeProfilingEnabled = false;
-#endif
-
-bool g_enable_intercept = kTypeProfilingEnabled;
-
-} // namespace
-
-// static
-void Controller::Stop() {
- g_enable_intercept = false;
-}
-
-// static
-bool Controller::IsProfiling() {
- return kTypeProfilingEnabled && g_enable_intercept;
-}
-
-// static
-void Controller::Restart() {
- g_enable_intercept = kTypeProfilingEnabled;
-}
-
-} // namespace type_profiler
-} // namespace base
diff --git a/chromium/base/allocator/type_profiler_control.h b/chromium/base/allocator/type_profiler_control.h
deleted file mode 100644
index 17cf5b65e4a..00000000000
--- a/chromium/base/allocator/type_profiler_control.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
-#define BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
-
-#include "base/gtest_prod_util.h"
-
-namespace base {
-namespace type_profiler {
-
-class Controller {
- public:
- static void Stop();
- static bool IsProfiling();
-
- private:
- FRIEND_TEST_ALL_PREFIXES(TypeProfilerTest,
- TestProfileNewWithoutProfiledDelete);
-
- // It must be used only from allowed unit tests. The following is only
- // allowed for use in unit tests. Profiling should never be restarted in
- // regular use.
- static void Restart();
-};
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // BASE_ALLOCATOR_TYPE_PROFILER_CONTROL_H_
diff --git a/chromium/base/allocator/type_profiler_map_unittest.cc b/chromium/base/allocator/type_profiler_map_unittest.cc
deleted file mode 100644
index 514ec164e81..00000000000
--- a/chromium/base/allocator/type_profiler_map_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a unittest set for type_profiler_map in third_party/tcmalloc. It is
-// independent from other tests and executed manually like allocator_unittests
-// since type_profiler_map is a singleton (like TCMalloc's heap-profiler), and
-// it requires RTTI and different compiling/linking options from others.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/memory/scoped_ptr.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/type_profiler_map.h"
-
-namespace base {
-namespace type_profiler {
-
-static const void* const g_const_null = static_cast<const void*>(NULL);
-
-TEST(TypeProfilerMapTest, NormalOperation) {
- // Allocate an object just to get a valid address.
- // This 'new' is not profiled by type_profiler.
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
-
- InsertType(dummy.get(), 12, typeid(int));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
-}
-
-TEST(TypeProfilerMapTest, EraseWithoutInsert) {
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- for (int i = 0; i < 10; ++i) {
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
- }
-}
-
-TEST(TypeProfilerMapTest, InsertThenMultipleErase) {
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- InsertType(dummy.get(), 12, typeid(int));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- for (int i = 0; i < 10; ++i) {
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
- }
-}
-
-TEST(TypeProfilerMapTest, MultipleInsertWithoutErase) {
- scoped_ptr<int> dummy(new int(48));
- const std::type_info* type;
-
- InsertType(dummy.get(), 12, typeid(int));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- InsertType(dummy.get(), 5, typeid(char));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(char).name(), type->name());
-
- InsertType(dummy.get(), 129, typeid(long));
- type = LookupType(dummy.get());
- ASSERT_NE(g_const_null, type);
- EXPECT_STREQ(typeid(long).name(), type->name());
-
- EraseType(dummy.get());
- type = LookupType(dummy.get());
- EXPECT_EQ(g_const_null, type);
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
diff --git a/chromium/base/allocator/type_profiler_tcmalloc.cc b/chromium/base/allocator/type_profiler_tcmalloc.cc
deleted file mode 100644
index e5e10e0d129..00000000000
--- a/chromium/base/allocator/type_profiler_tcmalloc.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/allocator/type_profiler_tcmalloc.h"
-
-#include "base/allocator/type_profiler_control.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/type_profiler_map.h"
-
-namespace base {
-namespace type_profiler {
-
-void* NewInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type) {
- if (Controller::IsProfiling())
- InsertType(ptr, size, type);
-
- return ptr;
-}
-
-void* DeleteInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type) {
- if (Controller::IsProfiling())
- EraseType(ptr);
-
- return ptr;
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
diff --git a/chromium/base/allocator/type_profiler_tcmalloc.h b/chromium/base/allocator/type_profiler_tcmalloc.h
deleted file mode 100644
index ac55995c824..00000000000
--- a/chromium/base/allocator/type_profiler_tcmalloc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_TYPE_PROFILER_TCMALLOC_H_
-#define BASE_ALLOCATOR_TYPE_PROFILER_TCMALLOC_H_
-
-#if defined(TYPE_PROFILING)
-
-#include <cstddef> // for size_t
-#include <typeinfo> // for std::type_info
-
-namespace base {
-namespace type_profiler {
-
-void* NewInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type);
-
-void* DeleteInterceptForTCMalloc(void* ptr,
- size_t size,
- const std::type_info& type);
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-#endif // BASE_ALLOCATOR_TYPE_PROFILER_TCMALLOC_H_
diff --git a/chromium/base/allocator/type_profiler_unittest.cc b/chromium/base/allocator/type_profiler_unittest.cc
deleted file mode 100644
index 3d7369c3802..00000000000
--- a/chromium/base/allocator/type_profiler_unittest.cc
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a unittest set for type_profiler. It is independent from other
-// tests and executed manually like allocator_unittests since type_profiler_map
-// used in type_profiler is a singleton (like TCMalloc's heap-profiler), and
-// it requires RTTI and different compiling/linking options from others
-//
-// It tests that the profiler doesn't fail in suspicous cases. For example,
-// 'new' is not profiled, but 'delete' for the created object is profiled.
-
-#if defined(TYPE_PROFILING)
-
-#include "base/allocator/type_profiler.h"
-#include "base/allocator/type_profiler_control.h"
-#include "base/allocator/type_profiler_tcmalloc.h"
-#include "base/basictypes.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/tcmalloc/chromium/src/gperftools/type_profiler_map.h"
-
-namespace base {
-namespace type_profiler {
-
-class TypeProfilerTest : public testing::Test {
- public:
- TypeProfilerTest() {}
-
- void SetInterceptFunctions() {
- InterceptFunctions::SetFunctions(NewInterceptForTCMalloc,
- DeleteInterceptForTCMalloc);
- }
-
- void ResetInterceptFunctions() {
- InterceptFunctions::ResetFunctions();
- }
-
- void SetUp() {
- SetInterceptFunctions();
- }
-
- void TearDown() {
- ResetInterceptFunctions();
- }
-
- protected:
- static const size_t kDummyArraySize;
- static const void* const kConstNull;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TypeProfilerTest);
-};
-
-const size_t TypeProfilerTest::kDummyArraySize = 10;
-const void* const TypeProfilerTest::kConstNull = static_cast<const void*>(NULL);
-
-TEST_F(TypeProfilerTest, TestNormalProfiling) {
- int* dummy = new int(48);
- const std::type_info* type;
-
- type = LookupType(dummy);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
- delete dummy;
-
- type = LookupType(dummy);
- EXPECT_EQ(kConstNull, type);
-}
-
-TEST_F(TypeProfilerTest, TestNormalArrayProfiling) {
- int* dummy = new int[kDummyArraySize];
- const std::type_info* type;
-
- type = LookupType(dummy);
- ASSERT_NE(kConstNull, type);
- // For an array, the profiler remembers its base type.
- EXPECT_STREQ(typeid(int).name(), type->name());
- delete[] dummy;
-
- type = LookupType(dummy);
- EXPECT_EQ(kConstNull, type);
-}
-
-TEST_F(TypeProfilerTest, TestRepeatedNewAndDelete) {
- int *dummy[kDummyArraySize];
- const std::type_info* type;
- for (int i = 0; i < kDummyArraySize; ++i)
- dummy[i] = new int(i);
-
- for (int i = 0; i < kDummyArraySize; ++i) {
- type = LookupType(dummy[i]);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
- }
-
- for (int i = 0; i < kDummyArraySize; ++i) {
- delete dummy[i];
- type = LookupType(dummy[i]);
- ASSERT_EQ(kConstNull, type);
- }
-}
-
-TEST_F(TypeProfilerTest, TestMultipleNewWithDroppingDelete) {
- static const size_t large_size = 256 * 1024;
-
- char* dummy_char = new char[large_size / sizeof(*dummy_char)];
- const std::type_info* type;
-
- type = LookupType(dummy_char);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(char).name(), type->name());
-
- // Call "::operator delete" directly to drop __op_delete_intercept__.
- ::operator delete[](dummy_char);
-
- type = LookupType(dummy_char);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(char).name(), type->name());
-
- // Allocates a little different size.
- int* dummy_int = new int[large_size / sizeof(*dummy_int) - 1];
-
- // We expect that tcmalloc returns the same address for these large (over 32k)
- // allocation calls. It usually happens, but maybe probablistic.
- ASSERT_EQ(static_cast<void*>(dummy_char), static_cast<void*>(dummy_int)) <<
- "two new (malloc) calls didn't return the same address; retry it.";
-
- type = LookupType(dummy_int);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- delete[] dummy_int;
-
- type = LookupType(dummy_int);
- EXPECT_EQ(kConstNull, type);
-}
-
-TEST_F(TypeProfilerTest, TestProfileDeleteWithoutProfiledNew) {
- // 'dummy' should be new'ed in this test before intercept functions are set.
- ResetInterceptFunctions();
-
- int* dummy = new int(48);
- const std::type_info* type;
-
- // Set intercept functions again after 'dummy' is new'ed.
- SetInterceptFunctions();
-
- delete dummy;
-
- type = LookupType(dummy);
- EXPECT_EQ(kConstNull, type);
-
- ResetInterceptFunctions();
-}
-
-TEST_F(TypeProfilerTest, TestProfileNewWithoutProfiledDelete) {
- int* dummy = new int(48);
- const std::type_info* type;
-
- EXPECT_TRUE(Controller::IsProfiling());
-
- // Stop profiling before deleting 'dummy'.
- Controller::Stop();
- EXPECT_FALSE(Controller::IsProfiling());
-
- delete dummy;
-
- // NOTE: We accept that a profile entry remains when a profiled object is
- // deleted after Controller::Stop().
- type = LookupType(dummy);
- ASSERT_NE(kConstNull, type);
- EXPECT_STREQ(typeid(int).name(), type->name());
-
- Controller::Restart();
- EXPECT_TRUE(Controller::IsProfiling());
-
- // Remove manually since 'dummy' is not removed from type_profiler_map.
- EraseType(dummy);
-}
-
-} // namespace type_profiler
-} // namespace base
-
-#endif // defined(TYPE_PROFILING)
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
diff --git a/chromium/base/android/animation_frame_time_histogram.cc b/chromium/base/android/animation_frame_time_histogram.cc
index 0d796193829..2cf7516ff2b 100644
--- a/chromium/base/android/animation_frame_time_histogram.cc
+++ b/chromium/base/android/animation_frame_time_histogram.cc
@@ -10,9 +10,9 @@
// static
void SaveHistogram(JNIEnv* env,
- jobject jcaller,
- jstring j_histogram_name,
- jlongArray j_frame_times_ms,
+ const JavaParamRef<jobject>& jcaller,
+ const JavaParamRef<jstring>& j_histogram_name,
+ const JavaParamRef<jlongArray>& j_frame_times_ms,
jint j_count) {
jlong *frame_times_ms = env->GetLongArrayElements(j_frame_times_ms, NULL);
std::string histogram_name = base::android::ConvertJavaStringToUTF8(
diff --git a/chromium/base/android/apk_assets.cc b/chromium/base/android/apk_assets.cc
index bcdac6db6ec..fab7fd017f4 100644
--- a/chromium/base/android/apk_assets.cc
+++ b/chromium/base/android/apk_assets.cc
@@ -26,7 +26,7 @@ int OpenApkAsset(const std::string& file_path,
ScopedJavaLocalRef<jlongArray> jarr = Java_ApkAssets_open(
env,
base::android::GetApplicationContext(),
- base::android::ConvertUTF8ToJavaString(env, file_path).Release());
+ base::android::ConvertUTF8ToJavaString(env, file_path).obj());
std::vector<jlong> results;
base::android::JavaLongArrayToLongVector(env, jarr.obj(), &results);
CHECK_EQ(3U, results.size());
diff --git a/chromium/base/android/application_status_listener.cc b/chromium/base/android/application_status_listener.cc
index 3e6fbf833ce..5aee781faab 100644
--- a/chromium/base/android/application_status_listener.cc
+++ b/chromium/base/android/application_status_listener.cc
@@ -66,7 +66,7 @@ void ApplicationStatusListener::NotifyApplicationStateChange(
}
static void OnApplicationStateChange(JNIEnv* env,
- jclass clazz,
+ const JavaParamRef<jclass>& clazz,
jint new_state) {
ApplicationState application_state = static_cast<ApplicationState>(new_state);
ApplicationStatusListener::NotifyApplicationStateChange(application_state);
diff --git a/chromium/base/android/build_info.h b/chromium/base/android/build_info.h
index 834c98dddf1..cc90df22016 100644
--- a/chromium/base/android/build_info.h
+++ b/chromium/base/android/build_info.h
@@ -24,7 +24,8 @@ enum SdkVersion {
SDK_VERSION_KITKAT = 19,
SDK_VERSION_KITKAT_WEAR = 20,
SDK_VERSION_LOLLIPOP = 21,
- SDK_VERSION_LOLLIPOP_MR1 = 22
+ SDK_VERSION_LOLLIPOP_MR1 = 22,
+ SDK_VERSION_MARSHMALLOW = 23
};
// BuildInfo is a singleton class that stores android build and device
diff --git a/chromium/base/android/command_line_android.cc b/chromium/base/android/command_line_android.cc
index 064450dd725..e196aedd82d 100644
--- a/chromium/base/android/command_line_android.cc
+++ b/chromium/base/android/command_line_android.cc
@@ -31,40 +31,50 @@ void AppendJavaStringArrayToCommandLine(JNIEnv* env,
} // namespace
-static void Reset(JNIEnv* env, jclass clazz) {
+static void Reset(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
CommandLine::Reset();
}
-static jboolean HasSwitch(JNIEnv* env, jclass clazz, jstring jswitch) {
+static jboolean HasSwitch(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jswitch) {
std::string switch_string(ConvertJavaStringToUTF8(env, jswitch));
return CommandLine::ForCurrentProcess()->HasSwitch(switch_string);
}
-static jstring GetSwitchValue(JNIEnv* env, jclass clazz, jstring jswitch) {
+static ScopedJavaLocalRef<jstring> GetSwitchValue(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jswitch) {
std::string switch_string(ConvertJavaStringToUTF8(env, jswitch));
std::string value(CommandLine::ForCurrentProcess()->GetSwitchValueNative(
switch_string));
if (value.empty())
- return 0;
- // OK to release, JNI binding.
- return ConvertUTF8ToJavaString(env, value).Release();
+ return ScopedJavaLocalRef<jstring>();
+ return ConvertUTF8ToJavaString(env, value);
}
-static void AppendSwitch(JNIEnv* env, jclass clazz, jstring jswitch) {
+static void AppendSwitch(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jswitch) {
std::string switch_string(ConvertJavaStringToUTF8(env, jswitch));
CommandLine::ForCurrentProcess()->AppendSwitch(switch_string);
}
-static void AppendSwitchWithValue(JNIEnv* env, jclass clazz,
- jstring jswitch, jstring jvalue) {
+static void AppendSwitchWithValue(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jswitch,
+ const JavaParamRef<jstring>& jvalue) {
std::string switch_string(ConvertJavaStringToUTF8(env, jswitch));
std::string value_string (ConvertJavaStringToUTF8(env, jvalue));
CommandLine::ForCurrentProcess()->AppendSwitchASCII(switch_string,
value_string);
}
-static void AppendSwitchesAndArguments(JNIEnv* env, jclass clazz,
- jobjectArray array) {
+static void AppendSwitchesAndArguments(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jobjectArray>& array) {
AppendJavaStringArrayToCommandLine(env, array, false);
}
diff --git a/chromium/base/android/cpu_features.cc b/chromium/base/android/cpu_features.cc
index 6a1869534b0..c139b7851a1 100644
--- a/chromium/base/android/cpu_features.cc
+++ b/chromium/base/android/cpu_features.cc
@@ -10,11 +10,11 @@
namespace base {
namespace android {
-jint GetCoreCount(JNIEnv*, jclass) {
+jint GetCoreCount(JNIEnv*, const JavaParamRef<jclass>&) {
return android_getCpuCount();
}
-jlong GetCpuFeatures(JNIEnv*, jclass) {
+jlong GetCpuFeatures(JNIEnv*, const JavaParamRef<jclass>&) {
return android_getCpuFeatures();
}
diff --git a/chromium/base/android/field_trial_list.cc b/chromium/base/android/field_trial_list.cc
index 9cb38d29106..9731a48e550 100644
--- a/chromium/base/android/field_trial_list.cc
+++ b/chromium/base/android/field_trial_list.cc
@@ -13,16 +13,18 @@
using base::android::ConvertJavaStringToUTF8;
using base::android::ConvertUTF8ToJavaString;
-static jstring FindFullName(JNIEnv* env,
- jclass clazz,
- jstring jtrial_name) {
+static ScopedJavaLocalRef<jstring> FindFullName(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jtrial_name) {
std::string trial_name(ConvertJavaStringToUTF8(env, jtrial_name));
return ConvertUTF8ToJavaString(
- env,
- base::FieldTrialList::FindFullName(trial_name)).Release();
+ env, base::FieldTrialList::FindFullName(trial_name));
}
-static jboolean TrialExists(JNIEnv* env, jclass clazz, jstring jtrial_name) {
+static jboolean TrialExists(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jtrial_name) {
std::string trial_name(ConvertJavaStringToUTF8(env, jtrial_name));
return base::FieldTrialList::TrialExists(trial_name);
}
diff --git a/chromium/base/android/important_file_writer_android.cc b/chromium/base/android/important_file_writer_android.cc
index bcbd785da04..f324738bb94 100644
--- a/chromium/base/android/important_file_writer_android.cc
+++ b/chromium/base/android/important_file_writer_android.cc
@@ -15,9 +15,9 @@ namespace base {
namespace android {
static jboolean WriteFileAtomically(JNIEnv* env,
- jclass /* clazz */,
- jstring file_name,
- jbyteArray data) {
+ const JavaParamRef<jclass>& /* clazz */,
+ const JavaParamRef<jstring>& file_name,
+ const JavaParamRef<jbyteArray>& data) {
// This is called on the UI thread during shutdown to save tab data, so
// needs to enable IO.
base::ThreadRestrictions::ScopedAllowIO allow_io;
diff --git a/chromium/base/android/library_loader/library_loader_hooks.cc b/chromium/base/android/library_loader/library_loader_hooks.cc
index 2ba78883471..67b7c5db1dd 100644
--- a/chromium/base/android/library_loader/library_loader_hooks.cc
+++ b/chromium/base/android/library_loader/library_loader_hooks.cc
@@ -57,7 +57,7 @@ long g_renderer_library_load_time_ms = 0;
static void RegisterChromiumAndroidLinkerRendererHistogram(
JNIEnv* env,
- jobject jcaller,
+ const JavaParamRef<jobject>& jcaller,
jboolean requested_shared_relro,
jboolean load_at_fixed_address_failed,
jlong library_load_time_ms) {
@@ -88,7 +88,7 @@ void RecordChromiumAndroidLinkerRendererHistogram() {
static void RecordChromiumAndroidLinkerBrowserHistogram(
JNIEnv* env,
- jobject jcaller,
+ const JavaParamRef<jobject>& jcaller,
jboolean is_using_browser_shared_relros,
jboolean load_at_fixed_address_failed,
jint library_load_from_apk_status,
@@ -120,13 +120,15 @@ void SetLibraryLoadedHook(LibraryLoadedHook* func) {
g_registration_callback = func;
}
-static void InitCommandLine(JNIEnv* env,
- jobject jcaller,
- jobjectArray init_command_line) {
+static void InitCommandLine(
+ JNIEnv* env,
+ const JavaParamRef<jobject>& jcaller,
+ const JavaParamRef<jobjectArray>& init_command_line) {
InitNativeCommandLineFromJavaArray(env, init_command_line);
}
-static jboolean LibraryLoaded(JNIEnv* env, jobject jcaller) {
+static jboolean LibraryLoaded(JNIEnv* env,
+ const JavaParamRef<jobject>& jcaller) {
if (g_registration_callback == NULL) {
return true;
}
@@ -140,7 +142,9 @@ void LibraryLoaderExitHook() {
}
}
-static jboolean ForkAndPrefetchNativeLibrary(JNIEnv* env, jclass clazz) {
+static jboolean ForkAndPrefetchNativeLibrary(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz) {
return NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary();
}
@@ -152,8 +156,10 @@ void SetVersionNumber(const char* version_number) {
g_library_version_number = strdup(version_number);
}
-jstring GetVersionNumber(JNIEnv* env, jobject jcaller) {
- return ConvertUTF8ToJavaString(env, g_library_version_number).Release();
+ScopedJavaLocalRef<jstring> GetVersionNumber(
+ JNIEnv* env,
+ const JavaParamRef<jobject>& jcaller) {
+ return ConvertUTF8ToJavaString(env, g_library_version_number);
}
LibraryProcessType GetLibraryProcessType(JNIEnv* env) {
diff --git a/chromium/base/android/library_loader/library_prefetcher.cc b/chromium/base/android/library_loader/library_prefetcher.cc
index 798a283d71b..118e80cc0e7 100644
--- a/chromium/base/android/library_loader/library_prefetcher.cc
+++ b/chromium/base/android/library_loader/library_prefetcher.cc
@@ -36,7 +36,7 @@ bool IsReadableAndPrivate(const base::debug::MappedMemoryRegion& region) {
bool PathMatchesSuffix(const std::string& path) {
for (size_t i = 0; i < arraysize(kSuffixesToMatch); i++) {
- if (EndsWith(path, kSuffixesToMatch[i], true)) {
+ if (EndsWith(path, kSuffixesToMatch[i], CompareCase::SENSITIVE)) {
return true;
}
}
@@ -82,14 +82,14 @@ void NativeLibraryPrefetcher::FilterLibchromeRangesOnlyIfPossible(
std::vector<AddressRange>* ranges) {
bool has_libchrome_region = false;
for (const base::debug::MappedMemoryRegion& region : regions) {
- if (EndsWith(region.path, kLibchromeSuffix, true)) {
+ if (EndsWith(region.path, kLibchromeSuffix, CompareCase::SENSITIVE)) {
has_libchrome_region = true;
break;
}
}
for (const base::debug::MappedMemoryRegion& region : regions) {
if (has_libchrome_region &&
- !EndsWith(region.path, kLibchromeSuffix, true)) {
+ !EndsWith(region.path, kLibchromeSuffix, CompareCase::SENSITIVE)) {
continue;
}
ranges->push_back(std::make_pair(region.start, region.end));
@@ -118,6 +118,12 @@ bool NativeLibraryPrefetcher::FindRanges(std::vector<AddressRange>* ranges) {
// static
bool NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary() {
+ // Avoid forking with cygprofile instrumentation because the latter performs
+ // memory allocations.
+#if defined(CYGPROFILE_INSTRUMENTATION)
+ return false;
+#endif
+
// Looking for ranges is done before the fork, to avoid syscalls and/or memory
// allocations in the forked process. The child process inherits the lock
// state of its parent thread. It cannot rely on being able to acquire any
@@ -126,6 +132,7 @@ bool NativeLibraryPrefetcher::ForkAndPrefetchNativeLibrary() {
std::vector<AddressRange> ranges;
if (!FindRanges(&ranges))
return false;
+
pid_t pid = fork();
if (pid == 0) {
setpriority(PRIO_PROCESS, 0, kBackgroundPriority);
diff --git a/chromium/base/android/linker/BUILD.gn b/chromium/base/android/linker/BUILD.gn
index 043bfc6b3be..3724b88fe1b 100644
--- a/chromium/base/android/linker/BUILD.gn
+++ b/chromium/base/android/linker/BUILD.gn
@@ -9,19 +9,20 @@ assert(is_android)
# GYP: //base/base.gyp:chromium_android_linker
shared_library("chromium_android_linker") {
sources = [
+ "android_dlext.h",
"legacy_linker_jni.cc",
+ "legacy_linker_jni.h",
+ "linker_jni.cc",
+ "linker_jni.h",
+ "modern_linker_jni.cc",
+ "modern_linker_jni.h",
]
# The NDK contains the crazy_linker here:
# '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
# However, we use our own fork. See bug 384700.
deps = [
+ "//build/config/sanitizers:deps",
"//third_party/android_crazy_linker",
]
-
- # TODO(GYP):
- # The crazy linker is never instrumented.
- #'cflags!': [
- #'-finstrument-functions',
- #],
}
diff --git a/chromium/base/android/linker/android_dlext.h b/chromium/base/android/linker/android_dlext.h
new file mode 100644
index 00000000000..edf5180c20a
--- /dev/null
+++ b/chromium/base/android/linker/android_dlext.h
@@ -0,0 +1,77 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Definitions for android_dlopen_ext().
+//
+// This function was added for Android L-MR1 and made available in android-21
+// but we currently build Chromium with android-16. Placing the declarations
+// we need here allows code that uses android_dlopen_ext() to build with
+// android-16. At runtime we check the target's SDK_INT to ensure that we
+// are on a system new enough to offer this function, and also only access
+// it with dlsym so that the runtime linker on pre-Android L-MR1 targets will
+// not complain about a missing symbol when loading our library.
+//
+// Details below taken from:
+// third_party/android_tools/ndk/platforms/android-21
+// /arch-arm/usr/include/android/dlext.h
+//
+// Although taken specifically from arch-arm, there are no architecture-
+// specific elements in dlext.h. All android-21/arch-* directories contain
+// identical copies of dlext.h.
+
+#ifndef BASE_ANDROID_LINKER_ANDROID_DLEXT_H_
+#define BASE_ANDROID_LINKER_ANDROID_DLEXT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* bitfield definitions for android_dlextinfo.flags */
+enum {
+ /* When set, the reserved_addr and reserved_size fields must point to an
+ * already-reserved region of address space which will be used to load the
+ * library if it fits. If the reserved region is not large enough, the load
+ * will fail.
+ */
+ ANDROID_DLEXT_RESERVED_ADDRESS = 0x1,
+
+ /* As DLEXT_RESERVED_ADDRESS, but if the reserved region is not large enough,
+ * the linker will choose an available address instead.
+ */
+ ANDROID_DLEXT_RESERVED_ADDRESS_HINT = 0x2,
+
+ /* When set, write the GNU RELRO section of the mapped library to relro_fd
+ * after relocation has been performed, to allow it to be reused by another
+ * process loading the same library at the same address. This implies
+ * ANDROID_DLEXT_USE_RELRO.
+ */
+ ANDROID_DLEXT_WRITE_RELRO = 0x4,
+
+ /* When set, compare the GNU RELRO section of the mapped library to relro_fd
+ * after relocation has been performed, and replace any relocated pages that
+ * are identical with a version mapped from the file.
+ */
+ ANDROID_DLEXT_USE_RELRO = 0x8,
+
+ /* Instruct dlopen to use library_fd instead of opening file by name.
+ * The filename parameter is still used to identify the library.
+ */
+ ANDROID_DLEXT_USE_LIBRARY_FD = 0x10,
+
+ /* Mask of valid bits */
+ ANDROID_DLEXT_VALID_FLAG_BITS = ANDROID_DLEXT_RESERVED_ADDRESS |
+ ANDROID_DLEXT_RESERVED_ADDRESS_HINT |
+ ANDROID_DLEXT_WRITE_RELRO |
+ ANDROID_DLEXT_USE_RELRO |
+ ANDROID_DLEXT_USE_LIBRARY_FD,
+};
+
+typedef struct {
+ uint64_t flags;
+ void* reserved_addr;
+ size_t reserved_size;
+ int relro_fd;
+ int library_fd;
+} android_dlextinfo;
+
+#endif // BASE_ANDROID_LINKER_ANDROID_DLEXT_H_
diff --git a/chromium/base/android/memory_pressure_listener_android.cc b/chromium/base/android/memory_pressure_listener_android.cc
index 80c07bcf9c1..9d3dd463df0 100644
--- a/chromium/base/android/memory_pressure_listener_android.cc
+++ b/chromium/base/android/memory_pressure_listener_android.cc
@@ -8,8 +8,9 @@
#include "jni/MemoryPressureListener_jni.h"
// Defined and called by JNI.
-static void OnMemoryPressure(
- JNIEnv* env, jclass clazz, jint memory_pressure_level) {
+static void OnMemoryPressure(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ jint memory_pressure_level) {
base::MemoryPressureListener::NotifyMemoryPressure(
static_cast<base::MemoryPressureListener::MemoryPressureLevel>(
memory_pressure_level));
diff --git a/chromium/base/android/path_service_android.cc b/chromium/base/android/path_service_android.cc
index 18ca70c8abd..9972bbb6ca3 100644
--- a/chromium/base/android/path_service_android.cc
+++ b/chromium/base/android/path_service_android.cc
@@ -13,7 +13,10 @@
namespace base {
namespace android {
-void Override(JNIEnv* env, jclass clazz, jint what, jstring path) {
+void Override(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ jint what,
+ const JavaParamRef<jstring>& path) {
FilePath file_path(ConvertJavaStringToUTF8(env, path));
PathService::Override(what, file_path);
}
diff --git a/chromium/base/android/path_utils_unittest.cc b/chromium/base/android/path_utils_unittest.cc
index c678ce21ec8..d991810aa91 100644
--- a/chromium/base/android/path_utils_unittest.cc
+++ b/chromium/base/android/path_utils_unittest.cc
@@ -5,6 +5,7 @@
#include "base/android/path_utils.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
+#include "base/strings/string_util.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -13,14 +14,27 @@ namespace android {
typedef testing::Test PathUtilsTest;
+namespace {
+void ExpectEither(const std::string& expected1,
+ const std::string& expected2,
+ const std::string& actual) {
+ EXPECT_TRUE(expected1 == actual || expected2 == actual)
+ << "Value of: " << actual << std::endl
+ << "Expected either: " << expected1 << std::endl
+ << "or: " << expected2;
+}
+} // namespace
+
TEST_F(PathUtilsTest, TestGetDataDirectory) {
// The string comes from the Java side and depends on the APK
// we are running in. Assumes that we are packaged in
// org.chromium.native_test
FilePath path;
GetDataDirectory(&path);
- EXPECT_STREQ("/data/data/org.chromium.native_test/app_chrome",
- path.value().c_str());
+
+ ExpectEither("/data/data/org.chromium.native_test/app_chrome",
+ "/data/user/0/org.chromium.native_test/app_chrome",
+ path.value());
}
TEST_F(PathUtilsTest, TestGetCacheDirectory) {
@@ -29,8 +43,9 @@ TEST_F(PathUtilsTest, TestGetCacheDirectory) {
// org.chromium.native_test
FilePath path;
GetCacheDirectory(&path);
- EXPECT_STREQ("/data/data/org.chromium.native_test/cache",
- path.value().c_str());
+ ExpectEither("/data/data/org.chromium.native_test/cache",
+ "/data/user/0/org.chromium.native_test/cache",
+ path.value());
}
TEST_F(PathUtilsTest, TestGetNativeLibraryDirectory) {
diff --git a/chromium/base/android/record_histogram.cc b/chromium/base/android/record_histogram.cc
index 9a68deca54f..51ca482e291 100644
--- a/chromium/base/android/record_histogram.cc
+++ b/chromium/base/android/record_histogram.cc
@@ -85,6 +85,30 @@ class HistogramCache {
return InsertLocked(j_histogram_key, histogram);
}
+ HistogramBase* LinearCountHistogram(JNIEnv* env,
+ jstring j_histogram_name,
+ jint j_histogram_key,
+ jint j_min,
+ jint j_max,
+ jint j_num_buckets) {
+ DCHECK(j_histogram_name);
+ DCHECK(j_histogram_key);
+ int64 min = static_cast<int64>(j_min);
+ int64 max = static_cast<int64>(j_max);
+ int num_buckets = static_cast<int>(j_num_buckets);
+ HistogramBase* histogram = FindLocked(j_histogram_key);
+ if (histogram) {
+ DCHECK(histogram->HasConstructionArguments(min, max, num_buckets));
+ return histogram;
+ }
+
+ std::string histogram_name = ConvertJavaStringToUTF8(env, j_histogram_name);
+ histogram =
+ LinearHistogram::FactoryGet(histogram_name, min, max, num_buckets,
+ HistogramBase::kUmaTargetedHistogramFlag);
+ return InsertLocked(j_histogram_key, histogram);
+ }
+
HistogramBase* SparseHistogram(JNIEnv* env,
jstring j_histogram_name,
jint j_histogram_key) {
@@ -150,8 +174,8 @@ base::LazyInstance<HistogramCache>::Leaky g_histograms;
} // namespace
void RecordBooleanHistogram(JNIEnv* env,
- jclass clazz,
- jstring j_histogram_name,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_histogram_name,
jint j_histogram_key,
jboolean j_sample) {
bool sample = static_cast<bool>(j_sample);
@@ -161,8 +185,8 @@ void RecordBooleanHistogram(JNIEnv* env,
}
void RecordEnumeratedHistogram(JNIEnv* env,
- jclass clazz,
- jstring j_histogram_name,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_histogram_name,
jint j_histogram_key,
jint j_sample,
jint j_boundary) {
@@ -174,8 +198,8 @@ void RecordEnumeratedHistogram(JNIEnv* env,
}
void RecordCustomCountHistogram(JNIEnv* env,
- jclass clazz,
- jstring j_histogram_name,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_histogram_name,
jint j_histogram_key,
jint j_sample,
jint j_min,
@@ -189,32 +213,49 @@ void RecordCustomCountHistogram(JNIEnv* env,
->Add(sample);
}
+void RecordLinearCountHistogram(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_histogram_name,
+ jint j_histogram_key,
+ jint j_sample,
+ jint j_min,
+ jint j_max,
+ jint j_num_buckets) {
+ int sample = static_cast<int>(j_sample);
+
+ g_histograms.Get()
+ .LinearCountHistogram(env, j_histogram_name, j_histogram_key, j_min,
+ j_max, j_num_buckets)
+ ->Add(sample);
+}
+
void RecordSparseHistogram(JNIEnv* env,
- jclass clazz,
- jstring j_histogram_name,
- jint j_histogram_key,
- jint j_sample) {
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_histogram_name,
+ jint j_histogram_key,
+ jint j_sample) {
int sample = static_cast<int>(j_sample);
g_histograms.Get()
.SparseHistogram(env, j_histogram_name, j_histogram_key)
->Add(sample);
}
-void RecordCustomTimesHistogramMilliseconds(JNIEnv* env,
- jclass clazz,
- jstring j_histogram_name,
- jint j_histogram_key,
- jlong j_duration,
- jlong j_min,
- jlong j_max,
- jint j_num_buckets) {
+void RecordCustomTimesHistogramMilliseconds(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_histogram_name,
+ jint j_histogram_key,
+ jlong j_duration,
+ jlong j_min,
+ jlong j_max,
+ jint j_num_buckets) {
g_histograms.Get()
.CustomTimesHistogram(env, j_histogram_name, j_histogram_key, j_min,
j_max, j_num_buckets)
->AddTime(TimeDelta::FromMilliseconds(static_cast<int64>(j_duration)));
}
-void Initialize(JNIEnv* env, jclass) {
+void Initialize(JNIEnv* env, const JavaParamRef<jclass>&) {
StatisticsRecorder::Initialize();
}
@@ -222,10 +263,11 @@ void Initialize(JNIEnv* env, jclass) {
// MetricsUtils.HistogramDelta. It should live in a test-specific file, but we
// currently can't have test-specific native code packaged in test-specific Java
// targets - see http://crbug.com/415945.
-jint GetHistogramValueCountForTesting(JNIEnv* env,
- jclass clazz,
- jstring histogram_name,
- jint sample) {
+jint GetHistogramValueCountForTesting(
+ JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& histogram_name,
+ jint sample) {
HistogramBase* histogram = StatisticsRecorder::FindHistogram(
android::ConvertJavaStringToUTF8(env, histogram_name));
if (histogram == nullptr) {
diff --git a/chromium/base/android/record_user_action.cc b/chromium/base/android/record_user_action.cc
index 6172f2e5667..1452341d823 100644
--- a/chromium/base/android/record_user_action.cc
+++ b/chromium/base/android/record_user_action.cc
@@ -11,7 +11,9 @@
namespace base {
namespace android {
-static void RecordUserAction(JNIEnv* env, jclass clazz, jstring j_action) {
+static void RecordUserAction(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& j_action) {
RecordComputedAction(ConvertJavaStringToUTF8(env, j_action));
}
diff --git a/chromium/base/android/scoped_java_ref.cc b/chromium/base/android/scoped_java_ref.cc
index bb6f5032fe3..4d4ef6da191 100644
--- a/chromium/base/android/scoped_java_ref.cc
+++ b/chromium/base/android/scoped_java_ref.cc
@@ -28,16 +28,15 @@ ScopedJavaLocalFrame::ScopedJavaLocalFrame(JNIEnv* env, int capacity)
ScopedJavaLocalFrame::~ScopedJavaLocalFrame() { env_->PopLocalFrame(NULL); }
-JavaRef<jobject>::JavaRef() : obj_(NULL) {}
-
+#if DCHECK_IS_ON()
+// This constructor is inlined when DCHECKs are disabled; don't add anything
+// else here.
JavaRef<jobject>::JavaRef(JNIEnv* env, jobject obj) : obj_(obj) {
if (obj) {
DCHECK(env && env->GetObjectRefType(obj) == JNILocalRefType);
}
}
-
-JavaRef<jobject>::~JavaRef() {
-}
+#endif
JNIEnv* JavaRef<jobject>::SetNewLocalRef(JNIEnv* env, jobject obj) {
if (!env) {
diff --git a/chromium/base/android/scoped_java_ref.h b/chromium/base/android/scoped_java_ref.h
index 8047ee8c167..cad63b75617 100644
--- a/chromium/base/android/scoped_java_ref.h
+++ b/chromium/base/android/scoped_java_ref.h
@@ -10,6 +10,8 @@
#include "base/base_export.h"
#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/template_util.h"
namespace base {
namespace android {
@@ -45,14 +47,21 @@ class BASE_EXPORT JavaRef<jobject> {
bool is_null() const { return obj_ == NULL; }
protected:
- // Initializes a NULL reference.
- JavaRef();
+ // Initializes a NULL reference. Don't add anything else here; it's inlined.
+ JavaRef() : obj_(NULL) {}
// Takes ownership of the |obj| reference passed; requires it to be a local
// reference type.
+#if DCHECK_IS_ON()
+ // Implementation contains a DCHECK; implement out-of-line when DCHECK_IS_ON.
JavaRef(JNIEnv* env, jobject obj);
+#else
+ // Don't add anything else here; it's inlined.
+ JavaRef(JNIEnv* env, jobject obj) : obj_(obj) {}
+#endif
- ~JavaRef();
+ // Don't add anything else here; it's inlined.
+ ~JavaRef() {}
// The following are implementation detail convenience methods, for
// use by the sub-classes.
@@ -86,6 +95,27 @@ class JavaRef : public JavaRef<jobject> {
DISALLOW_COPY_AND_ASSIGN(JavaRef);
};
+// Holds a local reference to a JNI method parameter.
+// Method parameters should not be deleted, and so this class exists purely to
+// wrap them as a JavaRef<T> in the JNI binding generator. Do not create
+// instances manually.
+template<typename T>
+class JavaParamRef : public JavaRef<T> {
+ public:
+ // Assumes that |obj| is a parameter passed to a JNI method from Java.
+ // Does not assume ownership as parameters should not be deleted.
+ JavaParamRef(JNIEnv* env, T obj) : JavaRef<T>(env, obj) {}
+
+ ~JavaParamRef() {}
+
+ // TODO(torne): remove this cast once we're using JavaRef consistently.
+ // http://crbug.com/506850
+ operator T() const { return JavaRef<T>::obj(); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(JavaParamRef);
+};
+
// Holds a local reference to a Java object. The local reference is scoped
// to the lifetime of this object.
// Instances of this class may hold onto any JNIEnv passed into it until
@@ -149,12 +179,14 @@ class ScopedJavaLocalRef : public JavaRef<T> {
template<typename U>
void Reset(JNIEnv* env, U obj) {
- implicit_cast<T>(obj); // Ensure U is assignable to T
+ static_assert(base::is_convertible<U, T>::value,
+ "U must be convertible to T");
env_ = this->SetNewLocalRef(env, obj);
}
// Releases the local reference to the caller. The caller *must* delete the
- // local reference when it is done with it.
+ // local reference when it is done with it. Note that calling a Java method
+ // is *not* a transfer of ownership and Release() should not be used.
T Release() {
return static_cast<T>(this->ReleaseInternal());
}
@@ -163,6 +195,13 @@ class ScopedJavaLocalRef : public JavaRef<T> {
// This class is only good for use on the thread it was created on so
// it's safe to cache the non-threadsafe JNIEnv* inside this object.
JNIEnv* env_;
+
+ // Prevent ScopedJavaLocalRef(JNIEnv*, T obj) from being used to take
+ // ownership of a JavaParamRef's underlying object - parameters are not
+ // allowed to be deleted and so should not be owned by ScopedJavaLocalRef.
+ // TODO(torne): this can be removed once JavaParamRef no longer has an
+ // implicit conversion back to T.
+ ScopedJavaLocalRef(JNIEnv* env, const JavaParamRef<T>& other);
};
// Holds a global reference to a Java object. The global reference is scoped
@@ -199,13 +238,20 @@ class ScopedJavaGlobalRef : public JavaRef<T> {
}
template<typename U>
+ void Reset(JNIEnv* env, const JavaParamRef<U>& other) {
+ this->Reset(env, other.obj());
+ }
+
+ template<typename U>
void Reset(JNIEnv* env, U obj) {
- implicit_cast<T>(obj); // Ensure U is assignable to T
+ static_assert(base::is_convertible<U, T>::value,
+ "U must be convertible to T");
this->SetNewGlobalRef(env, obj);
}
// Releases the global reference to the caller. The caller *must* delete the
- // global reference when it is done with it.
+ // global reference when it is done with it. Note that calling a Java method
+ // is *not* a transfer of ownership and Release() should not be used.
T Release() {
return static_cast<T>(this->ReleaseInternal());
}
diff --git a/chromium/base/android/trace_event_binding.cc b/chromium/base/android/trace_event_binding.cc
index 791b67fcf23..f761a646027 100644
--- a/chromium/base/android/trace_event_binding.cc
+++ b/chromium/base/android/trace_event_binding.cc
@@ -8,6 +8,7 @@
#include <set>
+#include "base/android/jni_string.h"
#include "base/lazy_instance.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
@@ -25,32 +26,28 @@ const char kLooperDispatchMessage[] = "Looper.dispatchMessage";
// Boilerplate for safely converting Java data to TRACE_EVENT data.
class TraceEventDataConverter {
public:
- TraceEventDataConverter(JNIEnv* env,
- jstring jname,
- jstring jarg)
+ TraceEventDataConverter(JNIEnv* env, jstring jname, jstring jarg)
: env_(env),
jname_(jname),
jarg_(jarg),
- name_(env->GetStringUTFChars(jname, NULL)),
- arg_(jarg ? env->GetStringUTFChars(jarg, NULL) : NULL) {
- }
+ name_(ConvertJavaStringToUTF8(env, jname)),
+ has_arg_(jarg != nullptr),
+ arg_(jarg ? ConvertJavaStringToUTF8(env, jarg) : "") {}
~TraceEventDataConverter() {
- env_->ReleaseStringUTFChars(jname_, name_);
- if (jarg_)
- env_->ReleaseStringUTFChars(jarg_, arg_);
}
// Return saves values to pass to TRACE_EVENT macros.
- const char* name() { return name_; }
- const char* arg_name() { return arg_ ? "arg" : NULL; }
- const char* arg() { return arg_; }
+ const char* name() { return name_.c_str(); }
+ const char* arg_name() { return has_arg_ ? "arg" : nullptr; }
+ const char* arg() { return has_arg_ ? arg_.c_str() : nullptr; }
private:
JNIEnv* env_;
jstring jname_;
jstring jarg_;
- const char* name_;
- const char* arg_;
+ std::string name_;
+ bool has_arg_;
+ std::string arg_;
DISALLOW_COPY_AND_ASSIGN(TraceEventDataConverter);
};
@@ -72,23 +69,26 @@ base::LazyInstance<TraceEnabledObserver>::Leaky g_trace_enabled_state_observer_;
} // namespace
-static void RegisterEnabledObserver(JNIEnv* env, jclass clazz) {
+static void RegisterEnabledObserver(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz) {
bool enabled = trace_event::TraceLog::GetInstance()->IsEnabled();
base::android::Java_TraceEvent_setEnabled(env, enabled);
trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(
g_trace_enabled_state_observer_.Pointer());
}
-static void StartATrace(JNIEnv* env, jclass clazz) {
+static void StartATrace(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
base::trace_event::TraceLog::GetInstance()->StartATrace();
}
-static void StopATrace(JNIEnv* env, jclass clazz) {
+static void StopATrace(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
base::trace_event::TraceLog::GetInstance()->StopATrace();
}
-static void Instant(JNIEnv* env, jclass clazz,
- jstring jname, jstring jarg) {
+static void Instant(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jname,
+ const JavaParamRef<jstring>& jarg) {
TraceEventDataConverter converter(env, jname, jarg);
if (converter.arg()) {
TRACE_EVENT_COPY_INSTANT1(kJavaCategory, converter.name(),
@@ -100,8 +100,10 @@ static void Instant(JNIEnv* env, jclass clazz,
}
}
-static void Begin(JNIEnv* env, jclass clazz,
- jstring jname, jstring jarg) {
+static void Begin(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jname,
+ const JavaParamRef<jstring>& jarg) {
TraceEventDataConverter converter(env, jname, jarg);
if (converter.arg()) {
TRACE_EVENT_COPY_BEGIN1(kJavaCategory, converter.name(),
@@ -111,8 +113,10 @@ static void Begin(JNIEnv* env, jclass clazz,
}
}
-static void End(JNIEnv* env, jclass clazz,
- jstring jname, jstring jarg) {
+static void End(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jname,
+ const JavaParamRef<jstring>& jarg) {
TraceEventDataConverter converter(env, jname, jarg);
if (converter.arg()) {
TRACE_EVENT_COPY_END1(kJavaCategory, converter.name(),
@@ -122,20 +126,26 @@ static void End(JNIEnv* env, jclass clazz,
}
}
-static void BeginToplevel(JNIEnv* env, jclass clazz) {
+static void BeginToplevel(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
TRACE_EVENT_BEGIN0(kToplevelCategory, kLooperDispatchMessage);
}
-static void EndToplevel(JNIEnv* env, jclass clazz) {
+static void EndToplevel(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
TRACE_EVENT_END0(kToplevelCategory, kLooperDispatchMessage);
}
-static void StartAsync(JNIEnv* env, jclass clazz, jstring jname, jlong jid) {
+static void StartAsync(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jname,
+ jlong jid) {
TraceEventDataConverter converter(env, jname, nullptr);
TRACE_EVENT_COPY_ASYNC_BEGIN0(kJavaCategory, converter.name(), jid);
}
-static void FinishAsync(JNIEnv* env, jclass clazz, jstring jname, jlong jid) {
+static void FinishAsync(JNIEnv* env,
+ const JavaParamRef<jclass>& clazz,
+ const JavaParamRef<jstring>& jname,
+ jlong jid) {
TraceEventDataConverter converter(env, jname, nullptr);
TRACE_EVENT_COPY_ASYNC_END0(kJavaCategory, converter.name(), jid);
}
diff --git a/chromium/base/async_socket_io_handler.h b/chromium/base/async_socket_io_handler.h
deleted file mode 100644
index a22c29d907d..00000000000
--- a/chromium/base/async_socket_io_handler.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ASYNC_SOCKET_IO_HANDLER_H_
-#define BASE_ASYNC_SOCKET_IO_HANDLER_H_
-
-#include "base/message_loop/message_loop.h"
-#include "base/sync_socket.h"
-#include "base/threading/non_thread_safe.h"
-
-namespace base {
-
-// Extends the CancelableSyncSocket class to allow reading from a socket
-// asynchronously on a TYPE_IO message loop thread. This makes it easy to share
-// a thread that uses a message loop (e.g. for IPC and other things) and not
-// require a separate thread to read from the socket.
-//
-// Example usage (also see the unit tests):
-//
-// class SocketReader {
-// public:
-// SocketReader(base::CancelableSyncSocket* socket)
-// : socket_(socket), buffer_() {
-// io_handler.Initialize(socket_->handle(),
-// base::Bind(&SocketReader::OnDataAvailable,
-// base::Unretained(this));
-// }
-//
-// void AsyncRead() {
-// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
-// }
-//
-// private:
-// void OnDataAvailable(int bytes_read) {
-// if (ProcessData(&buffer_[0], bytes_read)) {
-// // Issue another read.
-// CHECK(io_handler.Read(&buffer_[0], sizeof(buffer_)));
-// }
-// }
-//
-// base::AsyncSocketIoHandler io_handler;
-// base::CancelableSyncSocket* socket_;
-// char buffer_[kBufferSize];
-// };
-//
-class BASE_EXPORT AsyncSocketIoHandler
- : public NON_EXPORTED_BASE(base::NonThreadSafe),
-// The message loop callback interface is different based on platforms.
-#if defined(OS_WIN)
- public NON_EXPORTED_BASE(base::MessageLoopForIO::IOHandler) {
-#else
- public NON_EXPORTED_BASE(base::MessageLoopForIO::Watcher) {
-#endif
- public:
- AsyncSocketIoHandler();
- ~AsyncSocketIoHandler() override;
-
- // Type definition for the callback. The parameter tells how many
- // bytes were read and is 0 if an error occurred.
- typedef base::Callback<void(int)> ReadCompleteCallback;
-
- // Initializes the AsyncSocketIoHandler by hooking it up to the current
- // thread's message loop (must be TYPE_IO), to do async reads from the socket
- // on the current thread. The |callback| will be invoked whenever a Read()
- // has completed.
- bool Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback);
-
- // Attempts to read from the socket. The return value will be |false|
- // if an error occurred and |true| if data was read or a pending read
- // was issued. Regardless of async or sync operation, the
- // ReadCompleteCallback (see above) will be called when data is available.
- bool Read(char* buffer, int buffer_len);
-
- private:
-#if defined(OS_WIN)
- // Implementation of IOHandler on Windows.
- void OnIOCompleted(base::MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) override;
-#elif defined(OS_POSIX)
- // Implementation of base::MessageLoopForIO::Watcher.
- void OnFileCanWriteWithoutBlocking(int socket) override {}
- void OnFileCanReadWithoutBlocking(int socket) override;
-
- void EnsureWatchingSocket();
-#endif
-
- base::SyncSocket::Handle socket_;
-#if defined(OS_WIN)
- base::MessageLoopForIO::IOContext* context_;
- bool is_pending_;
-#elif defined(OS_POSIX)
- base::MessageLoopForIO::FileDescriptorWatcher socket_watcher_;
- // |pending_buffer_| and |pending_buffer_len_| are valid only between
- // Read() and OnFileCanReadWithoutBlocking().
- char* pending_buffer_;
- int pending_buffer_len_;
- // |true| iff the message loop is watching the socket for IO events.
- bool is_watching_;
-#endif
- ReadCompleteCallback read_complete_;
-
- DISALLOW_COPY_AND_ASSIGN(AsyncSocketIoHandler);
-};
-
-} // namespace base.
-
-#endif // BASE_ASYNC_SOCKET_IO_HANDLER_H_
diff --git a/chromium/base/async_socket_io_handler_posix.cc b/chromium/base/async_socket_io_handler_posix.cc
deleted file mode 100644
index 2fffb844ff8..00000000000
--- a/chromium/base/async_socket_io_handler_posix.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/async_socket_io_handler.h"
-
-#include <fcntl.h>
-
-#include "base/posix/eintr_wrapper.h"
-
-namespace base {
-
-AsyncSocketIoHandler::AsyncSocketIoHandler()
- : socket_(base::SyncSocket::kInvalidHandle),
- pending_buffer_(NULL),
- pending_buffer_len_(0),
- is_watching_(false) {
-}
-
-AsyncSocketIoHandler::~AsyncSocketIoHandler() {
- DCHECK(CalledOnValidThread());
-}
-
-void AsyncSocketIoHandler::OnFileCanReadWithoutBlocking(int socket) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(socket, socket_);
- DCHECK(!read_complete_.is_null());
-
- if (pending_buffer_) {
- int bytes_read = HANDLE_EINTR(read(socket_, pending_buffer_,
- pending_buffer_len_));
- DCHECK_GE(bytes_read, 0);
- pending_buffer_ = NULL;
- pending_buffer_len_ = 0;
- read_complete_.Run(bytes_read > 0 ? bytes_read : 0);
- } else {
- // We're getting notifications that we can read from the socket while
- // we're not waiting for data. In order to not starve the message loop,
- // let's stop watching the fd and restart the watch when Read() is called.
- is_watching_ = false;
- socket_watcher_.StopWatchingFileDescriptor();
- }
-}
-
-bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
- DCHECK(CalledOnValidThread());
- DCHECK(!read_complete_.is_null());
- DCHECK(!pending_buffer_);
-
- EnsureWatchingSocket();
-
- int bytes_read = HANDLE_EINTR(read(socket_, buffer, buffer_len));
- if (bytes_read < 0) {
- if (errno == EAGAIN) {
- pending_buffer_ = buffer;
- pending_buffer_len_ = buffer_len;
- } else {
- NOTREACHED() << "read(): " << errno;
- return false;
- }
- } else {
- read_complete_.Run(bytes_read);
- }
- return true;
-}
-
-bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback) {
- DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
-
- DetachFromThread();
-
- socket_ = socket;
- read_complete_ = callback;
-
- // SyncSocket is blocking by default, so let's convert it to non-blocking.
- int value = fcntl(socket, F_GETFL);
- if (!(value & O_NONBLOCK)) {
- // Set the socket to be non-blocking so we can do async reads.
- if (fcntl(socket, F_SETFL, O_NONBLOCK) == -1) {
- NOTREACHED();
- return false;
- }
- }
-
- return true;
-}
-
-void AsyncSocketIoHandler::EnsureWatchingSocket() {
- DCHECK(CalledOnValidThread());
- if (!is_watching_ && socket_ != base::SyncSocket::kInvalidHandle) {
- is_watching_ = base::MessageLoopForIO::current()->WatchFileDescriptor(
- socket_, true, base::MessageLoopForIO::WATCH_READ,
- &socket_watcher_, this);
- }
-}
-
-} // namespace base.
diff --git a/chromium/base/async_socket_io_handler_unittest.cc b/chromium/base/async_socket_io_handler_unittest.cc
deleted file mode 100644
index 721de9cd72d..00000000000
--- a/chromium/base/async_socket_io_handler_unittest.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/async_socket_io_handler.h"
-
-#include "base/bind.h"
-#include "base/location.h"
-#include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-const char kAsyncSocketIoTestString[] = "Hello, AsyncSocketIoHandler";
-const size_t kAsyncSocketIoTestStringLength =
- arraysize(kAsyncSocketIoTestString);
-
-class TestSocketReader {
- public:
- // Set |number_of_reads_before_quit| to >0 when you expect a specific number
- // of Read operations to complete. Once that number is reached, the current
- // message loop will be Quit(). Set |number_of_reads_before_quit| to -1 if
- // callbacks should not be counted.
- TestSocketReader(base::CancelableSyncSocket* socket,
- int number_of_reads_before_quit,
- bool issue_reads_from_callback,
- bool expect_eof)
- : socket_(socket), buffer_(),
- number_of_reads_before_quit_(number_of_reads_before_quit),
- callbacks_received_(0),
- issue_reads_from_callback_(issue_reads_from_callback),
- expect_eof_(expect_eof) {
- io_handler.Initialize(socket_->handle(),
- base::Bind(&TestSocketReader::OnRead,
- base::Unretained(this)));
- }
- ~TestSocketReader() {}
-
- bool IssueRead() {
- return io_handler.Read(&buffer_[0], sizeof(buffer_));
- }
-
- const char* buffer() const { return &buffer_[0]; }
-
- int callbacks_received() const { return callbacks_received_; }
-
- private:
- void OnRead(int bytes_read) {
- if (!expect_eof_) {
- EXPECT_GT(bytes_read, 0);
- } else {
- EXPECT_GE(bytes_read, 0);
- }
- ++callbacks_received_;
- if (number_of_reads_before_quit_ == callbacks_received_) {
- base::MessageLoop::current()->Quit();
- } else if (issue_reads_from_callback_) {
- IssueRead();
- }
- }
-
- base::AsyncSocketIoHandler io_handler;
- base::CancelableSyncSocket* socket_; // Ownership lies outside the class.
- char buffer_[kAsyncSocketIoTestStringLength];
- int number_of_reads_before_quit_;
- int callbacks_received_;
- bool issue_reads_from_callback_;
- bool expect_eof_;
-};
-
-// Workaround to be able to use a base::Closure for sending data.
-// Send() returns int but a closure must return void.
-void SendData(base::CancelableSyncSocket* socket,
- const void* buffer,
- size_t length) {
- socket->Send(buffer, length);
-}
-
-} // end namespace.
-
-// Tests doing a pending read from a socket and use an IO handler to get
-// notified of data.
-TEST(AsyncSocketIoHandlerTest, AsynchronousReadWithMessageLoop) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- TestSocketReader reader(&pair[0], 1, false, false);
- EXPECT_TRUE(reader.IssueRead());
-
- pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
- base::MessageLoop::current()->Run();
- EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
- EXPECT_EQ(1, reader.callbacks_received());
-}
-
-// Tests doing a read from a socket when we know that there is data in the
-// socket. Here we want to make sure that any async 'can read' notifications
-// won't trip us off and that the synchronous case works as well.
-TEST(AsyncSocketIoHandlerTest, SynchronousReadWithMessageLoop) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- TestSocketReader reader(&pair[0], -1, false, false);
-
- pair[1].Send(kAsyncSocketIoTestString, kAsyncSocketIoTestStringLength);
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(100));
- base::MessageLoop::current()->Run();
-
- EXPECT_TRUE(reader.IssueRead());
- EXPECT_EQ(strcmp(reader.buffer(), kAsyncSocketIoTestString), 0);
- // We've now verified that the read happened synchronously, but it's not
- // guaranteed that the callback has been issued since the callback will be
- // called asynchronously even though the read may have been done.
- // So we call RunUntilIdle() to allow any event notifications or APC's on
- // Windows, to execute before checking the count of how many callbacks we've
- // received.
- base::MessageLoop::current()->RunUntilIdle();
- EXPECT_EQ(1, reader.callbacks_received());
-}
-
-// Calls Read() from within a callback to test that simple read "loops" work.
-TEST(AsyncSocketIoHandlerTest, ReadFromCallback) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- const int kReadOperationCount = 10;
- TestSocketReader reader(&pair[0], kReadOperationCount, true, false);
- EXPECT_TRUE(reader.IssueRead());
-
- // Issue sends on an interval to satisfy the Read() requirements.
- int64 milliseconds = 0;
- for (int i = 0; i < kReadOperationCount; ++i) {
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, base::Bind(&SendData, &pair[1], kAsyncSocketIoTestString,
- kAsyncSocketIoTestStringLength),
- base::TimeDelta::FromMilliseconds(milliseconds));
- milliseconds += 10;
- }
-
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(100 + milliseconds));
-
- base::MessageLoop::current()->Run();
- EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
-}
-
-// Calls Read() then close other end, check that a correct callback is received.
-TEST(AsyncSocketIoHandlerTest, ReadThenClose) {
- base::MessageLoopForIO loop;
-
- base::CancelableSyncSocket pair[2];
- ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&pair[0], &pair[1]));
-
- const int kReadOperationCount = 1;
- TestSocketReader reader(&pair[0], kReadOperationCount, false, true);
- EXPECT_TRUE(reader.IssueRead());
-
- pair[1].Close();
-
- base::MessageLoop::current()->Run();
- EXPECT_EQ(kReadOperationCount, reader.callbacks_received());
-}
diff --git a/chromium/base/async_socket_io_handler_win.cc b/chromium/base/async_socket_io_handler_win.cc
deleted file mode 100644
index e1d215c980a..00000000000
--- a/chromium/base/async_socket_io_handler_win.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/async_socket_io_handler.h"
-
-namespace base {
-
-AsyncSocketIoHandler::AsyncSocketIoHandler()
- : socket_(base::SyncSocket::kInvalidHandle),
- context_(NULL),
- is_pending_(false) {}
-
-AsyncSocketIoHandler::~AsyncSocketIoHandler() {
- // We need to be deleted on the correct thread to avoid racing with the
- // message loop thread.
- DCHECK(CalledOnValidThread());
-
- if (context_) {
- if (is_pending_) {
- // Make the context be deleted by the message pump when done.
- context_->handler = NULL;
- } else {
- delete context_;
- }
- }
-}
-
-// Implementation of IOHandler on Windows.
-void AsyncSocketIoHandler::OnIOCompleted(
- base::MessageLoopForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) {
- DCHECK(CalledOnValidThread());
- DCHECK_EQ(context_, context);
- DCHECK(!read_complete_.is_null());
- is_pending_ = false;
- read_complete_.Run(error == ERROR_SUCCESS ? bytes_transfered : 0);
-}
-
-bool AsyncSocketIoHandler::Read(char* buffer, int buffer_len) {
- DCHECK(CalledOnValidThread());
- DCHECK(!read_complete_.is_null());
- DCHECK(!is_pending_);
- DCHECK_NE(socket_, base::SyncSocket::kInvalidHandle);
-
- DWORD bytes_read = 0;
- BOOL ok = ::ReadFile(socket_, buffer, buffer_len, &bytes_read,
- &context_->overlapped);
- // The completion port will be signaled regardless of completing the read
- // straight away or asynchronously (ERROR_IO_PENDING). OnIOCompleted() will
- // be called regardless and we don't need to explicitly run the callback
- // in the case where ok is FALSE and GLE==ERROR_IO_PENDING.
- is_pending_ = !ok && (GetLastError() == ERROR_IO_PENDING);
- return ok || is_pending_;
-}
-
-bool AsyncSocketIoHandler::Initialize(base::SyncSocket::Handle socket,
- const ReadCompleteCallback& callback) {
- DCHECK(!context_);
- DCHECK_EQ(socket_, base::SyncSocket::kInvalidHandle);
-
- DetachFromThread();
-
- socket_ = socket;
- read_complete_ = callback;
-
- base::MessageLoopForIO::current()->RegisterIOHandler(socket, this);
-
- context_ = new base::MessageLoopForIO::IOContext();
- context_->handler = this;
- memset(&context_->overlapped, 0, sizeof(context_->overlapped));
-
- return true;
-}
-
-} // namespace base.
diff --git a/chromium/base/base.gyp b/chromium/base/base.gyp
index 85367274858..4b11485e0a5 100644
--- a/chromium/base/base.gyp
+++ b/chromium/base/base.gyp
@@ -218,11 +218,14 @@
}],
],
}],
+ ['OS=="ios"', {
+ 'sources!': [
+ 'sync_socket.h',
+ 'sync_socket_posix.cc',
+ ]
+ }],
],
'sources': [
- 'async_socket_io_handler.h',
- 'async_socket_io_handler_posix.cc',
- 'async_socket_io_handler_win.cc',
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
@@ -288,6 +291,8 @@
],
'export_dependent_settings': [
'base',
+ '../third_party/icu/icu.gyp:icuuc',
+ '../third_party/icu/icu.gyp:icui18n',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
@@ -430,7 +435,6 @@
'android/path_utils_unittest.cc',
'android/scoped_java_ref_unittest.cc',
'android/sys_utils_unittest.cc',
- 'async_socket_io_handler_unittest.cc',
'at_exit_unittest.cc',
'atomicops_unittest.cc',
'barrier_closure_unittest.cc',
@@ -464,6 +468,7 @@
'debug/task_annotator_unittest.cc',
'deferred_sequenced_task_runner_unittest.cc',
'environment_unittest.cc',
+ 'feature_list_unittest.cc',
'file_version_info_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'files/file_path_unittest.cc',
@@ -483,6 +488,7 @@
'i18n/char_iterator_unittest.cc',
'i18n/file_util_icu_unittest.cc',
'i18n/icu_string_conversions_unittest.cc',
+ 'i18n/message_formatter_unittest.cc',
'i18n/number_formatting_unittest.cc',
'i18n/rtl_unittest.cc',
'i18n/streaming_utf8_validator_unittest.cc',
@@ -515,6 +521,7 @@
'memory/aligned_memory_unittest.cc',
'memory/discardable_shared_memory_unittest.cc',
'memory/linked_ptr_unittest.cc',
+ 'memory/memory_pressure_listener_unittest.cc',
'memory/memory_pressure_monitor_chromeos_unittest.cc',
'memory/memory_pressure_monitor_mac_unittest.cc',
'memory/memory_pressure_monitor_win_unittest.cc',
@@ -524,6 +531,7 @@
'memory/scoped_ptr_unittest.nc',
'memory/scoped_vector_unittest.cc',
'memory/shared_memory_unittest.cc',
+ 'memory/shared_memory_mac_unittest.cc',
'memory/singleton_unittest.cc',
'memory/weak_ptr_unittest.cc',
'memory/weak_ptr_unittest.nc',
@@ -607,8 +615,6 @@
'task/cancelable_task_tracker_unittest.cc',
'task_runner_util_unittest.cc',
'template_util_unittest.cc',
- 'test/expectations/expectation_unittest.cc',
- 'test/expectations/parser_unittest.cc',
'test/histogram_tester_unittest.cc',
'test/test_pending_task_unittest.cc',
'test/test_reg_util_win_unittest.cc',
@@ -652,6 +658,7 @@
'win/registry_unittest.cc',
'win/scoped_bstr_unittest.cc',
'win/scoped_comptr_unittest.cc',
+ 'win/scoped_handle_unittest.cc',
'win/scoped_process_information_unittest.cc',
'win/scoped_variant_unittest.cc',
'win/shortcut_unittest.cc',
@@ -689,6 +696,8 @@
}],
['OS == "ios" and _toolset != "host"', {
'sources/': [
+ # iOS does not support FilePathWatcher.
+ ['exclude', '^files/file_path_watcher_unittest\\.cc$'],
# Only test the iOS-meaningful portion of memory and process_utils.
['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
['exclude', '^memory/shared_memory_unittest\\.cc$'],
@@ -753,6 +762,11 @@
}],
]},
],
+ [ 'OS == "win" and target_arch == "x64"', {
+ 'sources': [
+ 'profiler/win32_stack_frame_unwinder_unittest.cc',
+ ],
+ }],
['OS == "win"', {
'sources!': [
'file_descriptor_shuffle_unittest.cc',
@@ -811,6 +825,12 @@
['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
'defines': ['SYSTEM_NATIVE_UTF8'],
}],
+ # SyncSocket isn't used on iOS
+ ['OS=="ios"', {
+ 'sources!': [
+ 'sync_socket_unittest.cc',
+ ],
+ }],
], # target_conditions
},
{
@@ -891,10 +911,6 @@
}],
],
'sources': [
- 'test/expectations/expectation.cc',
- 'test/expectations/expectation.h',
- 'test/expectations/parser.cc',
- 'test/expectations/parser.h',
'test/gtest_util.cc',
'test/gtest_util.h',
'test/gtest_xml_unittest_result_printer.cc',
@@ -903,6 +919,8 @@
'test/gtest_xml_util.h',
'test/histogram_tester.cc',
'test/histogram_tester.h',
+ 'test/icu_test_util.cc',
+ 'test/icu_test_util.h',
'test/ios/wait_util.h',
'test/ios/wait_util.mm',
'test/launcher/test_launcher.cc',
@@ -1176,9 +1194,6 @@
4267,
],
'sources': [
- 'async_socket_io_handler.h',
- 'async_socket_io_handler_posix.cc',
- 'async_socket_io_handler_win.cc',
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
@@ -1437,7 +1452,7 @@
'target_name': 'base_java',
'type': 'none',
'variables': {
- 'java_in_dir': '../base/android/java',
+ 'java_in_dir': 'android/java',
'jar_excluded_classes': [ '*/NativeLibraries.class' ],
},
'dependencies': [
@@ -1446,6 +1461,7 @@
'base_java_library_process_type',
'base_java_memory_pressure_level',
'base_native_libraries_gen',
+ '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
'../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
],
'includes': [ '../build/java.gypi' ],
@@ -1503,12 +1519,30 @@
'includes': [ '../build/java.gypi' ],
},
{
+ # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
+ # in the multidex shadow library. crbug.com/522043
+ # GN: //base:base_junit_test_support
+ 'target_name': 'base_junit_test_support',
+ 'type': 'none',
+ 'dependencies': [
+ '../testing/android/junit/junit_test.gyp:junit_test_support',
+ '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
+ ],
+ 'variables': {
+ 'src_paths': [
+ '../base/test/android/junit/',
+ ],
+ },
+ 'includes': [ '../build/host_jar.gypi' ]
+ },
+ {
# GN: //base:base_junit_tests
'target_name': 'base_junit_tests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
+ 'base_junit_test_support',
'../testing/android/junit/junit_test.gyp:junit_test_support',
],
'variables': {
@@ -1537,7 +1571,13 @@
'target_name': 'chromium_android_linker',
'type': 'shared_library',
'sources': [
+ 'android/linker/android_dlext.h',
'android/linker/legacy_linker_jni.cc',
+ 'android/linker/legacy_linker_jni.h',
+ 'android/linker/linker_jni.cc',
+ 'android/linker/linker_jni.h',
+ 'android/linker/modern_linker_jni.cc',
+ 'android/linker/modern_linker_jni.h',
],
# The crazy linker is never instrumented.
'cflags!': [
@@ -1577,22 +1617,31 @@
'includes': [ '../build/apk_test.gypi' ],
},
],
+ 'conditions': [
+ ['test_isolation_mode != "noop"',
+ {
+ 'targets': [
+ {
+ 'target_name': 'base_unittests_apk_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'base_unittests_apk',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'base_unittests_apk.isolate',
+ ],
+ },
+ ]
+ }
+ ],
+ ],
}],
['OS == "win"', {
'targets': [
{
- 'target_name': 'debug_message',
- 'type': 'executable',
- 'sources': [
- 'debug_message.cc',
- ],
- 'msvs_settings': {
- 'VCLinkerTool': {
- 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
- },
- },
- },
- {
# Target to manually rebuild pe_image_test.dll which is checked into
# base/test/data/pe_image.
'target_name': 'pe_image_test',
diff --git a/chromium/base/base.gypi b/chromium/base/base.gypi
index 7ffad5417a0..ffb96bb65ac 100644
--- a/chromium/base/base.gypi
+++ b/chromium/base/base.gypi
@@ -18,8 +18,6 @@
'../build/build_config.h',
'allocator/allocator_extension.cc',
'allocator/allocator_extension.h',
- 'allocator/type_profiler_control.cc',
- 'allocator/type_profiler_control.h',
'android/animation_frame_time_histogram.cc',
'android/animation_frame_time_histogram.h',
'android/apk_assets.cc',
@@ -177,6 +175,8 @@
'deferred_sequenced_task_runner.h',
'environment.cc',
'environment.h',
+ 'feature_list.cc',
+ 'feature_list.h',
'file_descriptor_posix.h',
'file_version_info.h',
'file_version_info_mac.h',
@@ -317,8 +317,6 @@
'mac/scoped_mach_vm.h',
'mac/scoped_nsautorelease_pool.h',
'mac/scoped_nsautorelease_pool.mm',
- 'mac/scoped_nsexception_enabler.h',
- 'mac/scoped_nsexception_enabler.mm',
'mac/scoped_nsobject.h',
'mac/scoped_objc_class_swizzler.h',
'mac/scoped_objc_class_swizzler.mm',
@@ -363,6 +361,7 @@
'memory/shared_memory_android.cc',
'memory/shared_memory_handle.h',
'memory/shared_memory_handle_mac.cc',
+ 'memory/shared_memory_handle_win.cc',
'memory/shared_memory_mac.cc',
'memory/shared_memory_nacl.cc',
'memory/shared_memory_posix.cc',
@@ -476,6 +475,7 @@
'process/memory_linux.cc',
'process/memory_mac.mm',
'process/memory_win.cc',
+ 'process/port_provider_mac.h',
'process/process.h',
'process/process_handle_freebsd.cc',
'process/process_handle_linux.cc',
@@ -495,7 +495,6 @@
'process/process_iterator_openbsd.cc',
'process/process_iterator_win.cc',
'process/process_linux.cc',
- 'process/process_mac.cc',
'process/process_metrics.cc',
'process/process_metrics.h',
'process/process_metrics_freebsd.cc',
@@ -511,14 +510,14 @@
'profiler/alternate_timer.h',
'profiler/native_stack_sampler.cc',
'profiler/native_stack_sampler.h',
+ 'profiler/native_stack_sampler_posix.cc',
+ 'profiler/native_stack_sampler_win.cc',
'profiler/scoped_profile.cc',
'profiler/scoped_profile.h',
'profiler/scoped_tracker.cc',
'profiler/scoped_tracker.h',
'profiler/stack_sampling_profiler.cc',
'profiler/stack_sampling_profiler.h',
- 'profiler/stack_sampling_profiler_posix.cc',
- 'profiler/stack_sampling_profiler_win.cc',
'profiler/tracked_time.cc',
'profiler/tracked_time.h',
'rand_util.cc',
@@ -764,15 +763,18 @@
'include_dirs': [
'..',
],
- 'msvs_disabled_warnings': [
- 4018,
- ],
'target_conditions': [
['OS == "mac" or OS == "ios"', {
'sources!': [
'memory/shared_memory_posix.cc',
],
}],
+ ['OS == "ios"', {
+ 'sources!': [
+ 'memory/discardable_shared_memory.cc',
+ 'memory/discardable_shared_memory.h',
+ ],
+ }],
['(<(desktop_linux) == 0 and <(chromeos) == 0) or >(nacl_untrusted_build)==1', {
'sources/': [
['exclude', '^nix/'],
@@ -792,8 +794,6 @@
],
['>(nacl_untrusted_build)==1', {
'sources!': [
- 'allocator/type_profiler_control.cc',
- 'allocator/type_profiler_control.h',
'base_paths.cc',
'cpu.cc',
'debug/stack_trace.cc',
@@ -888,6 +888,7 @@
['include', '^mac/scoped_nsautorelease_pool\\.'],
['include', '^mac/scoped_nsobject\\.'],
['include', '^mac/scoped_objc_class_swizzler\\.'],
+ ['include', '^memory/shared_memory_posix\\.'],
['include', '^message_loop/message_pump_mac\\.'],
['include', '^strings/sys_string_conversions_mac\\.'],
['include', '^threading/platform_thread_mac\\.'],
@@ -900,10 +901,9 @@
['include', '^process/memory_stubs\.cc$'],
['include', '^process/process_handle_posix\.cc$'],
['include', '^process/process_metrics\\.cc$'],
+ # Exclude unsupported features on iOS.
+ ['exclude', '^files/file_path_watcher.*'],
['exclude', '^threading/platform_thread_internal_posix\\.(h|cc)'],
- ['exclude', 'files/file_path_watcher_fsevents.cc'],
- ['exclude', 'files/file_path_watcher_fsevents.h'],
- ['include', 'files/file_path_watcher_mac.cc'],
],
'sources': [
'process/memory_stubs.cc',
@@ -938,6 +938,10 @@
'include_dirs': [
'<(DEPTH)/third_party/wtl/include',
],
+ 'sources': [
+ 'profiler/win32_stack_frame_unwinder.cc',
+ 'profiler/win32_stack_frame_unwinder.h',
+ ],
'sources!': [
'files/file_path_watcher_fsevents.cc',
'files/file_path_watcher_fsevents.h',
@@ -1023,6 +1027,8 @@
'i18n/icu_string_conversions.h',
'i18n/icu_util.cc',
'i18n/icu_util.h',
+ 'i18n/message_formatter.cc',
+ 'i18n/message_formatter.h',
'i18n/number_formatting.cc',
'i18n/number_formatting.h',
'i18n/rtl.cc',
diff --git a/chromium/base/base.isolate b/chromium/base/base.isolate
index c7ba651ac71..e2d8beaca8a 100644
--- a/chromium/base/base.isolate
+++ b/chromium/base/base.isolate
@@ -9,6 +9,8 @@
'../third_party/icu/icu.isolate',
# Sanitizer-instrumented third-party libraries (if enabled).
'../third_party/instrumented_libraries/instrumented_libraries.isolate',
+ # MSVS runtime libraries.
+ '../build/config/win/msvs_dependencies.isolate',
],
'conditions': [
['use_custom_libcxx==1', {
@@ -36,7 +38,9 @@
['OS=="win" and asan==1 and component=="shared_library"', {
'variables': {
'files': [
- '../third_party/llvm-build/Release+Asserts/lib/clang/3.7.0/lib/windows/clang_rt.asan_dynamic-i386.dll',
+ # We only need x.y.z/lib/windows/clang_rt.asan_dynamic-i386.dll,
+ # but since the version (x.y.z) changes, just grab the whole dir.
+ '../third_party/llvm-build/Release+Asserts/lib/clang/',
],
},
}],
@@ -56,40 +60,6 @@
],
},
}],
- # Copy the VS runtime DLLs into the isolate so that they
- # don't have to be preinstalled on the target machine.
- ['OS=="win" and component=="shared_library" and CONFIGURATION_NAME=="Debug"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/x64/msvcp120d.dll',
- '<(PRODUCT_DIR)/x64/msvcr120d.dll',
- ],
- },
- }],
- ['OS=="win" and component=="shared_library" and CONFIGURATION_NAME=="Release"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/x64/msvcp120.dll',
- '<(PRODUCT_DIR)/x64/msvcr120.dll',
- ],
- },
- }],
- ['OS=="win" and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/msvcp120d.dll',
- '<(PRODUCT_DIR)/msvcr120d.dll',
- ],
- },
- }],
- ['OS=="win" and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/msvcp120.dll',
- '<(PRODUCT_DIR)/msvcr120.dll',
- ],
- },
- }],
# Workaround for https://code.google.com/p/swarming/issues/detail?id=211
['asan==0 or lsan==0 or msan==0 or tsan==0', {
'variables': {},
diff --git a/chromium/base/base_nacl.gyp b/chromium/base/base_nacl.gyp
index 47cf840550f..b2b4b7f76ea 100644
--- a/chromium/base/base_nacl.gyp
+++ b/chromium/base/base_nacl.gyp
@@ -35,7 +35,7 @@
'sync_socket_nacl.cc',
'time/time_posix.cc',
],
- 'gcc_compile_flags': [
+ 'compile_flags': [
'-fno-strict-aliasing',
],
},
diff --git a/chromium/base/base_paths_win.h b/chromium/base/base_paths_win.h
index 9ac9e45e0a7..454d67b7703 100644
--- a/chromium/base/base_paths_win.h
+++ b/chromium/base/base_paths_win.h
@@ -42,8 +42,8 @@ enum {
DIR_COMMON_DESKTOP, // Directory for the common desktop (visible
// on all user's Desktop).
DIR_USER_QUICK_LAUNCH, // Directory for the quick launch shortcuts.
- DIR_TASKBAR_PINS, // Directory for the shortcuts pinned to taskbar via
- // base::win::TaskbarPinShortcutLink().
+ DIR_TASKBAR_PINS, // Directory for the shortcuts pinned to taskbar
+ // (Win7+) via base::win::PinShortcutToTaskbar().
DIR_WINDOWS_FONTS, // Usually C:\Windows\Fonts.
PATH_WIN_END
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index 7f3be7f516c..76827b850a1 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -23,6 +23,16 @@ const char kEnableLowEndDeviceMode[] = "enable-low-end-device-mode";
// Force disabling of low-end device mode when set.
const char kDisableLowEndDeviceMode[] = "disable-low-end-device-mode";
+// This option can be used to force field trials when testing changes locally.
+// The argument is a list of name and value pairs, separated by slashes. If a
+// trial name is prefixed with an asterisk, that trial will start activated.
+// For example, the following argument defines two trials, with the second one
+// activated: "GoogleNow/Enable/*MaterialDesignNTP/Default/" This option can
+// also be used by the browser process to send the list of trials to a
+// non-browser process, using the same format. See
+// FieldTrialList::CreateTrialsFromString() in field_trial.h for details.
+const char kForceFieldTrials[] = "force-fieldtrials";
+
// Suppresses all error dialogs when present.
const char kNoErrorDialogs[] = "noerrdialogs";
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index bbd590bad05..95f6bffb23d 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -13,6 +13,7 @@ namespace switches {
extern const char kDisableBreakpad[];
extern const char kEnableCrashReporter[];
+extern const char kForceFieldTrials[];
extern const char kFullMemoryCrashReport[];
extern const char kEnableLowEndDeviceMode[];
extern const char kDisableLowEndDeviceMode[];
diff --git a/chromium/base/base_unittests.isolate b/chromium/base/base_unittests.isolate
index 57fc4d2e531..208501fce86 100644
--- a/chromium/base/base_unittests.isolate
+++ b/chromium/base/base_unittests.isolate
@@ -25,9 +25,7 @@
'variables': {
'files': [
'../testing/test_env.py',
- '<(PRODUCT_DIR)/base_unittests<(EXECUTABLE_SUFFIX)',
],
- 'read_only': 1,
},
}],
['OS=="linux"', {
diff --git a/chromium/base/base_unittests_apk.isolate b/chromium/base/base_unittests_apk.isolate
new file mode 100644
index 00000000000..425c779833c
--- /dev/null
+++ b/chromium/base/base_unittests_apk.isolate
@@ -0,0 +1,24 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ '../build/android/android.isolate',
+ 'base_unittests.isolate',
+ ],
+ 'variables': {
+ 'command': [
+ '<(PRODUCT_DIR)/bin/run_base_unittests',
+ ],
+ 'files': [
+ '../build/config/',
+ '../third_party/icu/icu.isolate',
+ '../third_party/instrumented_libraries/instrumented_libraries.isolate',
+ '<(PRODUCT_DIR)/base_unittests_apk/',
+ '<(PRODUCT_DIR)/bin/run_base_unittests',
+ '<(PRODUCT_DIR)/icudtl.dat',
+ 'base.isolate',
+ 'base_unittests.isolate',
+ ]
+ },
+}
diff --git a/chromium/base/basictypes.h b/chromium/base/basictypes.h
index d71abd9bac7..e167466ffe2 100644
--- a/chromium/base/basictypes.h
+++ b/chromium/base/basictypes.h
@@ -28,7 +28,8 @@ typedef uint32_t uint32;
typedef int64_t int64;
typedef uint64_t uint64;
-// DEPRECATED: Please use std::numeric_limits (from <limits>) instead.
+// DEPRECATED: Please use std::numeric_limits (from <limits>) or
+// (U)INT{8,16,32,64}_{MIN,MAX} in case of globals (and include <stdint.h>).
const uint8 kuint8max = 0xFF;
const uint16 kuint16max = 0xFFFF;
const uint32 kuint32max = 0xFFFFFFFF;
diff --git a/chromium/base/bits.h b/chromium/base/bits.h
index b2209e8ed79..505d2c8f75d 100644
--- a/chromium/base/bits.h
+++ b/chromium/base/bits.h
@@ -41,6 +41,12 @@ inline int Log2Ceiling(uint32 n) {
}
}
+// Round up |size| to a multiple of alignment, which must be a power of two.
+inline size_t Align(size_t size, size_t alignment) {
+ DCHECK_EQ(alignment & (alignment - 1), 0u);
+ return (size + alignment - 1) & ~(alignment - 1);
+}
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/bits_unittest.cc b/chromium/base/bits_unittest.cc
index e913d6ae598..1dad0f45319 100644
--- a/chromium/base/bits_unittest.cc
+++ b/chromium/base/bits_unittest.cc
@@ -5,6 +5,9 @@
// This file contains the unit tests for the bit utilities.
#include "base/bits.h"
+
+#include <limits>
+
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -44,5 +47,17 @@ TEST(BitsTest, Log2Ceiling) {
EXPECT_EQ(32, Log2Ceiling(0xffffffffU));
}
+TEST(BitsTest, Align) {
+ const size_t kSizeTMax = std::numeric_limits<size_t>::max();
+ EXPECT_EQ(0ul, Align(0, 4));
+ EXPECT_EQ(4ul, Align(1, 4));
+ EXPECT_EQ(4096ul, Align(1, 4096));
+ EXPECT_EQ(4096ul, Align(4096, 4096));
+ EXPECT_EQ(4096ul, Align(4095, 4096));
+ EXPECT_EQ(8192ul, Align(4097, 4096));
+ EXPECT_EQ(kSizeTMax - 31, Align(kSizeTMax - 62, 32));
+ EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
+}
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index fefd7a2b201..b6353dc8e56 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -17,9 +17,6 @@
#include "base/memory/scoped_ptr.h"
#include "base/template_util.h"
-template <typename T>
-class ScopedVector;
-
namespace base {
namespace internal {
class CallbackBase;
diff --git a/chromium/base/command_line.cc b/chromium/base/command_line.cc
index 3fcf22ac52f..c2ce33db5d2 100644
--- a/chromium/base/command_line.cc
+++ b/chromium/base/command_line.cc
@@ -267,7 +267,7 @@ void CommandLine::SetProgram(const FilePath& program) {
}
bool CommandLine::HasSwitch(const base::StringPiece& switch_string) const {
- DCHECK_EQ(StringToLowerASCII(switch_string.as_string()), switch_string);
+ DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
return switches_by_stringpiece_.find(switch_string) !=
switches_by_stringpiece_.end();
}
@@ -297,7 +297,7 @@ FilePath CommandLine::GetSwitchValuePath(
CommandLine::StringType CommandLine::GetSwitchValueNative(
const base::StringPiece& switch_string) const {
- DCHECK_EQ(StringToLowerASCII(switch_string.as_string()), switch_string);
+ DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
auto result = switches_by_stringpiece_.find(switch_string);
return result == switches_by_stringpiece_.end() ? StringType()
: *(result->second);
@@ -315,7 +315,7 @@ void CommandLine::AppendSwitchPath(const std::string& switch_string,
void CommandLine::AppendSwitchNative(const std::string& switch_string,
const CommandLine::StringType& value) {
#if defined(OS_WIN)
- const std::string switch_key = StringToLowerASCII(switch_string);
+ const std::string switch_key = ToLowerASCII(switch_string);
StringType combined_switch_string(ASCIIToUTF16(switch_key));
#elif defined(OS_POSIX)
const std::string& switch_key = switch_string;
@@ -394,8 +394,9 @@ void CommandLine::PrependWrapper(const CommandLine::StringType& wrapper) {
return;
// The wrapper may have embedded arguments (like "gdb --args"). In this case,
// we don't pretend to do anything fancy, we just split on spaces.
- StringVector wrapper_argv;
- SplitString(wrapper, FILE_PATH_LITERAL(' '), &wrapper_argv);
+ StringVector wrapper_argv = SplitString(
+ wrapper, FilePath::StringType(1, ' '), base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
// Prepend the wrapper and update the switches/arguments |begin_args_|.
argv_.insert(argv_.begin(), wrapper_argv.begin(), wrapper_argv.end());
begin_args_ += wrapper_argv.size();
diff --git a/chromium/base/command_line_unittest.cc b/chromium/base/command_line_unittest.cc
index 018d83f1add..ac8a39567c3 100644
--- a/chromium/base/command_line_unittest.cc
+++ b/chromium/base/command_line_unittest.cc
@@ -378,6 +378,9 @@ TEST(CommandLineTest, ProgramQuotes) {
// Calling Init multiple times should not modify the previous CommandLine.
TEST(CommandLineTest, Init) {
+ // Call Init without checking output once so we know it's been called
+ // whether or not the test runner does so.
+ CommandLine::Init(0, NULL);
CommandLine* initial = CommandLine::ForCurrentProcess();
EXPECT_FALSE(CommandLine::Init(0, NULL));
CommandLine* current = CommandLine::ForCurrentProcess();
diff --git a/chromium/base/compiler_specific.h b/chromium/base/compiler_specific.h
index 66dc80db319..02603df5845 100644
--- a/chromium/base/compiler_specific.h
+++ b/chromium/base/compiler_specific.h
@@ -68,28 +68,6 @@
#endif // COMPILER_MSVC
-// The C++ standard requires that static const members have an out-of-class
-// definition (in a single compilation unit), but MSVC chokes on this (when
-// language extensions, which are required, are enabled). (You're only likely to
-// notice the need for a definition if you take the address of the member or,
-// more commonly, pass it to a function that takes it as a reference argument --
-// probably an STL function.) This macro makes MSVC do the right thing. See
-// http://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx for more
-// information. Use like:
-//
-// In .h file:
-// struct Foo {
-// static const int kBar = 5;
-// };
-//
-// In .cc file:
-// STATIC_CONST_MEMBER_DEFINITION const int Foo::kBar;
-#if defined(COMPILER_MSVC)
-#define STATIC_CONST_MEMBER_DEFINITION __declspec(selectany)
-#else
-#define STATIC_CONST_MEMBER_DEFINITION
-#endif
-
// Annotate a variable indicating it's ok if the variable is not used.
// (Typically used to silence a compiler warning when the assignment
// is important for some other reason.)
@@ -101,7 +79,7 @@
// Annotate a typedef or function indicating it's ok if it's not used.
// Use like:
// typedef Foo Bar ALLOW_UNUSED_TYPE;
-#if defined(COMPILER_GCC)
+#if defined(COMPILER_GCC) || defined(__clang__)
#define ALLOW_UNUSED_TYPE __attribute__((unused))
#else
#define ALLOW_UNUSED_TYPE
diff --git a/chromium/base/containers/scoped_ptr_map.h b/chromium/base/containers/scoped_ptr_map.h
index a4605e3b688..622a39d121e 100644
--- a/chromium/base/containers/scoped_ptr_map.h
+++ b/chromium/base/containers/scoped_ptr_map.h
@@ -42,9 +42,9 @@ class ScopedPtrMap {
ScopedPtrMap() {}
~ScopedPtrMap() { clear(); }
- ScopedPtrMap(ScopedPtrMap<Key, ScopedPtr>&& other) { swap(other); }
+ ScopedPtrMap(ScopedPtrMap&& other) { swap(other); }
- ScopedPtrMap& operator=(ScopedPtrMap<Key, ScopedPtr>&& rhs) {
+ ScopedPtrMap& operator=(ScopedPtrMap&& rhs) {
swap(rhs);
return *this;
}
@@ -61,7 +61,7 @@ class ScopedPtrMap {
const_iterator begin() const { return data_.begin(); }
const_iterator end() const { return data_.end(); }
- void swap(ScopedPtrMap<Key, ScopedPtr>& other) { data_.swap(other.data_); }
+ void swap(ScopedPtrMap& other) { data_.swap(other.data_); }
void clear() { STLDeleteValues(&data_); }
diff --git a/chromium/base/containers/scoped_ptr_map_unittest.cc b/chromium/base/containers/scoped_ptr_map_unittest.cc
index 46843b3f252..706b2edfb18 100644
--- a/chromium/base/containers/scoped_ptr_map_unittest.cc
+++ b/chromium/base/containers/scoped_ptr_map_unittest.cc
@@ -147,15 +147,30 @@ TEST(ScopedPtrMapTest, Clear) {
TEST(ScopedPtrMapTest, Compare) {
// Construct a ScopedPtrMap with a custom comparison function.
- bool destroyed = false;
- ScopedPtrMap<int, scoped_ptr<ScopedDestroyer>, std::greater<int>> scoped_map;
- scoped_map.insert(0, make_scoped_ptr(new ScopedDestroyer(&destroyed)));
- scoped_map.insert(1, make_scoped_ptr(new ScopedDestroyer(&destroyed)));
+ ScopedPtrMap<int, scoped_ptr<int>, std::greater<int>> scoped_map1;
+ scoped_map1.insert(0, make_scoped_ptr(new int(0)));
+ scoped_map1.insert(1, make_scoped_ptr(new int(0)));
- auto it = scoped_map.begin();
+ auto it = scoped_map1.begin();
EXPECT_EQ(1, it->first);
++it;
EXPECT_EQ(0, it->first);
+
+ // Test the move constructor.
+ ScopedPtrMap<int, scoped_ptr<int>, std::greater<int>> scoped_map2(
+ scoped_map1.Pass());
+ EXPECT_EQ(2u, scoped_map2.size());
+ EXPECT_TRUE(scoped_map1.empty());
+
+ // Test move assignment.
+ scoped_map1 = scoped_map2.Pass();
+ EXPECT_EQ(2u, scoped_map1.size());
+ EXPECT_TRUE(scoped_map2.empty());
+
+ // Test swap.
+ scoped_map2.swap(scoped_map1);
+ EXPECT_EQ(2u, scoped_map2.size());
+ EXPECT_TRUE(scoped_map1.empty());
}
TEST(ScopedPtrMapTest, Scope) {
diff --git a/chromium/base/cpu.cc b/chromium/base/cpu.cc
index ef3309dad18..edba2c21988 100644
--- a/chromium/base/cpu.cc
+++ b/chromium/base/cpu.cc
@@ -44,6 +44,7 @@ CPU::CPU()
has_sse42_(false),
has_avx_(false),
has_avx_hardware_(false),
+ has_avx2_(false),
has_aesni_(false),
has_non_stop_time_stamp_counter_(false),
has_broken_neon_(false),
@@ -72,7 +73,7 @@ void __cpuid(int cpu_info[4], int info_type) {
void __cpuid(int cpu_info[4], int info_type) {
__asm__ volatile (
- "cpuid \n\t"
+ "cpuid\n"
: "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
@@ -85,7 +86,8 @@ void __cpuid(int cpu_info[4], int info_type) {
uint64 _xgetbv(uint32 xcr) {
uint32 eax, edx;
- __asm__ volatile ("xgetbv" : "=a" (eax), "=d" (edx) : "c" (xcr));
+ __asm__ volatile (
+ "xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
return (static_cast<uint64>(edx) << 32) | eax;
}
@@ -110,7 +112,7 @@ class LazyCpuInfoValue {
revision = 0;
const struct {
const char key[17];
- unsigned *result;
+ unsigned int* result;
} kUnsignedValues[] = {
{"CPU implementer", &implementer},
{"CPU architecture", &architecture},
@@ -156,7 +158,7 @@ class LazyCpuInfoValue {
// The string may have leading "0x" or not, so we use strtoul to
// handle that.
- char *endptr;
+ char* endptr;
std::string value(value_sp.as_string());
unsigned long int result = strtoul(value.c_str(), &endptr, 0);
if (*endptr == 0 && result <= UINT_MAX) {
@@ -211,7 +213,11 @@ void CPU::Initialize() {
// Interpret CPU feature information.
if (num_ids > 0) {
+ int cpu_info7[4] = {0};
__cpuid(cpu_info, 1);
+ if (num_ids >= 7) {
+ __cpuid(cpu_info7, 7);
+ }
signature_ = cpu_info[0];
stepping_ = cpu_info[0] & 0xf;
model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
@@ -244,6 +250,7 @@ void CPU::Initialize() {
(cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
(_xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
+ has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
}
// Get the brand string of the cpu.
@@ -275,6 +282,7 @@ void CPU::Initialize() {
}
CPU::IntelMicroArchitecture CPU::GetIntelMicroArchitecture() const {
+ if (has_avx2()) return AVX2;
if (has_avx()) return AVX;
if (has_sse42()) return SSE42;
if (has_sse41()) return SSE41;
diff --git a/chromium/base/cpu.h b/chromium/base/cpu.h
index 0c809f00c8b..b3960782706 100644
--- a/chromium/base/cpu.h
+++ b/chromium/base/cpu.h
@@ -26,6 +26,7 @@ class BASE_EXPORT CPU {
SSE41,
SSE42,
AVX,
+ AVX2,
MAX_INTEL_MICRO_ARCHITECTURE
};
@@ -46,6 +47,7 @@ class BASE_EXPORT CPU {
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
bool has_avx() const { return has_avx_; }
+ bool has_avx2() const { return has_avx2_; }
// has_avx_hardware returns true when AVX is present in the CPU. This might
// differ from the value of |has_avx()| because |has_avx()| also tests for
// operating system support needed to actually call AVX instuctions.
@@ -84,6 +86,7 @@ class BASE_EXPORT CPU {
bool has_sse42_;
bool has_avx_;
bool has_avx_hardware_;
+ bool has_avx2_;
bool has_aesni_;
bool has_non_stop_time_stamp_counter_;
bool has_broken_neon_;
diff --git a/chromium/base/cpu_unittest.cc b/chromium/base/cpu_unittest.cc
index 18bf959a55e..931509738fe 100644
--- a/chromium/base/cpu_unittest.cc
+++ b/chromium/base/cpu_unittest.cc
@@ -7,6 +7,11 @@
#include "testing/gtest/include/gtest/gtest.h"
+#if _MSC_VER >= 1700
+// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
+#pragma warning(disable: 4752)
+#endif
+
// Tests whether we can run extended instructions represented by the CPU
// information. This test actually executes some extended instructions (such as
// MMX, SSE, etc.) supported by the CPU and sees we can run them without
@@ -17,77 +22,95 @@ TEST(CPU, RunExtendedInstructions) {
// Retrieve the CPU information.
base::CPU cpu;
-// TODO(jschuh): crbug.com/168866 Find a way to enable this on Win64.
-#if defined(OS_WIN) && !defined(_M_X64)
ASSERT_TRUE(cpu.has_mmx());
+ ASSERT_TRUE(cpu.has_sse());
+ ASSERT_TRUE(cpu.has_sse2());
+// TODO(fbarchard): consider enabling for clangcl.
+#if defined(COMPILER_GCC)
// Execute an MMX instruction.
- __asm emms;
+ __asm__ __volatile__("emms\n" : : : "mm0");
- if (cpu.has_sse()) {
- // Execute an SSE instruction.
- __asm xorps xmm0, xmm0;
- }
+ // Execute an SSE instruction.
+ __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
- if (cpu.has_sse2()) {
- // Execute an SSE 2 instruction.
- __asm psrldq xmm0, 0;
- }
+ // Execute an SSE 2 instruction.
+ __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
if (cpu.has_sse3()) {
// Execute an SSE 3 instruction.
- __asm addsubpd xmm0, xmm0;
+ __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
}
if (cpu.has_ssse3()) {
// Execute a Supplimental SSE 3 instruction.
- __asm psignb xmm0, xmm0;
+ __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
}
if (cpu.has_sse41()) {
// Execute an SSE 4.1 instruction.
- __asm pmuldq xmm0, xmm0;
+ __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
}
if (cpu.has_sse42()) {
// Execute an SSE 4.2 instruction.
- __asm crc32 eax, eax;
+ __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
}
-#elif defined(OS_POSIX) && defined(__x86_64__)
- ASSERT_TRUE(cpu.has_mmx());
- // Execute an MMX instruction.
- __asm__ __volatile__("emms\n" : : : "mm0");
-
- if (cpu.has_sse()) {
- // Execute an SSE instruction.
- __asm__ __volatile__("xorps %%xmm0, %%xmm0\n" : : : "xmm0");
+ if (cpu.has_avx()) {
+ // Execute an AVX instruction.
+ __asm__ __volatile__("vzeroupper\n" : : : "xmm0");
}
- if (cpu.has_sse2()) {
- // Execute an SSE 2 instruction.
- __asm__ __volatile__("psrldq $0, %%xmm0\n" : : : "xmm0");
+ if (cpu.has_avx2()) {
+ // Execute an AVX 2 instruction.
+ __asm__ __volatile__("vpunpcklbw %%ymm0, %%ymm0, %%ymm0\n" : : : "xmm0");
}
+// TODO(jschuh): crbug.com/168866 Find a way to enable this on Win64.
+#elif defined(COMPILER_MSVC) && defined(ARCH_CPU_32_BITS)
+
+ // Execute an MMX instruction.
+ __asm emms;
+
+ // Execute an SSE instruction.
+ __asm xorps xmm0, xmm0;
+
+ // Execute an SSE 2 instruction.
+ __asm psrldq xmm0, 0;
+
if (cpu.has_sse3()) {
// Execute an SSE 3 instruction.
- __asm__ __volatile__("addsubpd %%xmm0, %%xmm0\n" : : : "xmm0");
+ __asm addsubpd xmm0, xmm0;
}
if (cpu.has_ssse3()) {
// Execute a Supplimental SSE 3 instruction.
- __asm__ __volatile__("psignb %%xmm0, %%xmm0\n" : : : "xmm0");
+ __asm psignb xmm0, xmm0;
}
if (cpu.has_sse41()) {
// Execute an SSE 4.1 instruction.
- __asm__ __volatile__("pmuldq %%xmm0, %%xmm0\n" : : : "xmm0");
+ __asm pmuldq xmm0, xmm0;
}
if (cpu.has_sse42()) {
// Execute an SSE 4.2 instruction.
- __asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
+ __asm crc32 eax, eax;
}
-#endif
-#endif
+
+// Visual C 2012 required for AVX.
+#if _MSC_VER >= 1700
+ if (cpu.has_avx()) {
+ // Execute an AVX instruction.
+ __asm vzeroupper;
+ }
+
+ if (cpu.has_avx2()) {
+ // Execute an AVX 2 instruction.
+ __asm vpunpcklbw ymm0, ymm0, ymm0
+ }
+#endif // _MSC_VER >= 1700
+#endif // defined(COMPILER_GCC)
+#endif // defined(ARCH_CPU_X86_FAMILY)
}
diff --git a/chromium/base/debug/crash_logging.cc b/chromium/base/debug/crash_logging.cc
index f9b44490d4b..058b476800d 100644
--- a/chromium/base/debug/crash_logging.cc
+++ b/chromium/base/debug/crash_logging.cc
@@ -119,7 +119,7 @@ void SetCrashKeyFromAddresses(const base::StringPiece& key,
hex_backtrace.push_back(s);
}
- value = JoinString(hex_backtrace, ' ');
+ value = base::JoinString(hex_backtrace, " ");
// Warn if this exceeds the breakpad limits.
DCHECK_LE(value.length(), kBreakpadValueMax);
diff --git a/chromium/base/debug/debugger_posix.cc b/chromium/base/debug/debugger_posix.cc
index 8599571e345..a2e804f2e63 100644
--- a/chromium/base/debug/debugger_posix.cc
+++ b/chromium/base/debug/debugger_posix.cc
@@ -35,6 +35,7 @@
#include <ostream>
#include "base/basictypes.h"
+#include "base/debug/alias.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/posix/eintr_wrapper.h"
@@ -237,6 +238,12 @@ void BreakDebugger() {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
+ // Linker's ICF feature may merge this function with other functions with the
+ // same definition (e.g. any function whose sole job is to call abort()) and
+ // it may confuse the crash report processing system. http://crbug.com/508489
+ static int static_variable_to_make_this_function_unique = 0;
+ base::debug::Alias(&static_variable_to_make_this_function_unique);
+
DEBUG_BREAK();
#if defined(OS_ANDROID) && !defined(OFFICIAL_BUILD)
// For Android development we always build release (debug builds are
diff --git a/chromium/base/debug/gdi_debug_util_win.cc b/chromium/base/debug/gdi_debug_util_win.cc
index 3cd71f1191d..2db10d17822 100644
--- a/chromium/base/debug/gdi_debug_util_win.cc
+++ b/chromium/base/debug/gdi_debug_util_win.cc
@@ -75,11 +75,9 @@ void GDIBitmapAllocFailure(BITMAPINFOHEADER* header, HANDLE shared_section) {
base::debug::Alias(&heigth);
base::debug::Alias(&shared_section);
- int num_user_handles = GetGuiResources(GetCurrentProcess(),
- GR_USEROBJECTS);
+ DWORD num_user_handles = GetGuiResources(GetCurrentProcess(), GR_USEROBJECTS);
- int num_gdi_handles = GetGuiResources(GetCurrentProcess(),
- GR_GDIOBJECTS);
+ DWORD num_gdi_handles = GetGuiResources(GetCurrentProcess(), GR_GDIOBJECTS);
if (num_gdi_handles == 0) {
DWORD get_gui_resources_error = GetLastError();
base::debug::Alias(&get_gui_resources_error);
diff --git a/chromium/base/debug/proc_maps_linux.cc b/chromium/base/debug/proc_maps_linux.cc
index 4c1aedf0032..8c8965b8f15 100644
--- a/chromium/base/debug/proc_maps_linux.cc
+++ b/chromium/base/debug/proc_maps_linux.cc
@@ -96,8 +96,8 @@ bool ParseProcMaps(const std::string& input,
// This isn't async safe nor terribly efficient, but it doesn't need to be at
// this point in time.
- std::vector<std::string> lines;
- SplitString(input, '\n', &lines);
+ std::vector<std::string> lines = SplitString(
+ input, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
for (size_t i = 0; i < lines.size(); ++i) {
// Due to splitting on '\n' the last line should be empty.
diff --git a/chromium/base/debug/stack_trace.h b/chromium/base/debug/stack_trace.h
index fb271b6da4a..85c91777b49 100644
--- a/chromium/base/debug/stack_trace.h
+++ b/chromium/base/debug/stack_trace.h
@@ -26,16 +26,13 @@ namespace debug {
// Enables stack dump to console output on exception and signals.
// When enabled, the process will quit immediately. This is meant to be used in
// unit_tests only! This is not thread-safe: only call from main thread.
-BASE_EXPORT bool EnableInProcessStackDumping();
-
-// A different version of EnableInProcessStackDumping that also works for
-// sandboxed processes. For more details take a look at the description
-// of EnableInProcessStackDumping.
+// In sandboxed processes, this has to be called before the sandbox is turned
+// on.
// Calling this function on Linux opens /proc/self/maps and caches its
-// contents. In DEBUG builds, this function also opens the object files that
-// are loaded in memory and caches their file descriptors (this cannot be
+// contents. In non-official builds, this function also opens the object files
+// that are loaded in memory and caches their file descriptors (this cannot be
// done in official builds because it has security implications).
-BASE_EXPORT bool EnableInProcessStackDumpingForSandbox();
+BASE_EXPORT bool EnableInProcessStackDumping();
// A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
@@ -54,8 +51,8 @@ class BASE_EXPORT StackTrace {
// Creates a stacktrace for an exception.
// Note: this function will throw an import not found (StackWalk64) exception
// on system without dbghelp 5.1.
- StackTrace(const _EXCEPTION_POINTERS* exception_pointers);
- StackTrace(const _CONTEXT* context);
+ StackTrace(_EXCEPTION_POINTERS* exception_pointers);
+ StackTrace(_CONTEXT* context);
#endif
// Copying and assignment are allowed with the default functions.
diff --git a/chromium/base/debug/stack_trace_posix.cc b/chromium/base/debug/stack_trace_posix.cc
index 9593962d3aa..d8eb00598d3 100644
--- a/chromium/base/debug/stack_trace_posix.cc
+++ b/chromium/base/debug/stack_trace_posix.cc
@@ -281,6 +281,16 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
}
PrintToStderr("\n");
+#if defined(CFI_ENFORCEMENT)
+ if (signal == SIGILL && info->si_code == ILL_ILLOPN) {
+ PrintToStderr(
+ "CFI: Most likely a control flow integrity violation; for more "
+ "information see:\n");
+ PrintToStderr(
+ "https://www.chromium.org/developers/testing/control-flow-integrity\n");
+ }
+#endif
+
debug::StackTrace().Print();
#if defined(OS_LINUX)
@@ -395,6 +405,9 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1));
#endif // ARCH_CPU_32_BITS
#endif // defined(OS_MACOSX)
+
+ PrintToStderr("[end of stack trace]\n");
+
_exit(1);
}
@@ -499,7 +512,7 @@ class SandboxSymbolizeHelper {
int GetFileDescriptor(const char* file_path) {
int fd = -1;
-#if !defined(NDEBUG)
+#if !defined(OFFICIAL_BUILD)
if (file_path) {
// The assumption here is that iterating over std::map<std::string, int>
// using a const_iterator does not allocate dynamic memory, hense it is
@@ -520,7 +533,7 @@ class SandboxSymbolizeHelper {
fd = -1;
}
}
-#endif // !defined(NDEBUG)
+#endif // !defined(OFFICIAL_BUILD)
return fd;
}
@@ -606,11 +619,9 @@ class SandboxSymbolizeHelper {
// Opens all object files and caches their file descriptors.
void OpenSymbolFiles() {
// Pre-opening and caching the file descriptors of all loaded modules is
- // not considered safe for retail builds. Hence it is only done in debug
- // builds. For more details, take a look at: http://crbug.com/341966
- // Enabling this to release mode would require approval from the security
- // team.
-#if !defined(NDEBUG)
+ // not safe for production builds. Hence it is only done in non-official
+ // builds. For more details, take a look at: http://crbug.com/341966.
+#if !defined(OFFICIAL_BUILD)
// Open the object files for all read-only executable regions and cache
// their file descriptors.
std::vector<MappedMemoryRegion>::const_iterator it;
@@ -642,7 +653,7 @@ class SandboxSymbolizeHelper {
}
}
}
-#endif // !defined(NDEBUG)
+#endif // !defined(OFFICIAL_BUILD)
}
// Initializes and installs the symbolization callback.
@@ -664,7 +675,7 @@ class SandboxSymbolizeHelper {
// Closes all file descriptors owned by this instance.
void CloseObjectFiles() {
-#if !defined(NDEBUG)
+#if !defined(OFFICIAL_BUILD)
std::map<std::string, int>::iterator it;
for (it = modules_.begin(); it != modules_.end(); ++it) {
int ret = IGNORE_EINTR(close(it->second));
@@ -672,19 +683,18 @@ class SandboxSymbolizeHelper {
it->second = -1;
}
modules_.clear();
-#endif // !defined(NDEBUG)
+#endif // !defined(OFFICIAL_BUILD)
}
// Set to true upon successful initialization.
bool is_initialized_;
-#if !defined(NDEBUG)
+#if !defined(OFFICIAL_BUILD)
// Mapping from file name to file descriptor. Includes file descriptors
// for all successfully opened object files and the file descriptor for
- // /proc/self/maps. This code is not safe for release builds so
- // this is only done for DEBUG builds.
+ // /proc/self/maps. This code is not safe for production builds.
std::map<std::string, int> modules_;
-#endif // !defined(NDEBUG)
+#endif // !defined(OFFICIAL_BUILD)
// Cache for the process memory regions. Produced by parsing the contents
// of /proc/self/maps cache.
@@ -694,15 +704,11 @@ class SandboxSymbolizeHelper {
};
#endif // USE_SYMBOLIZE
-bool EnableInProcessStackDumpingForSandbox() {
+bool EnableInProcessStackDumping() {
#if defined(USE_SYMBOLIZE)
SandboxSymbolizeHelper::GetInstance();
#endif // USE_SYMBOLIZE
- return EnableInProcessStackDumping();
-}
-
-bool EnableInProcessStackDumping() {
// When running in an application, our code typically expects SIGPIPE
// to be ignored. Therefore, when testing that same code, it should run
// with SIGPIPE ignored as well.
diff --git a/chromium/base/debug/stack_trace_win.cc b/chromium/base/debug/stack_trace_win.cc
index 55d55624088..d5be5efb355 100644
--- a/chromium/base/debug/stack_trace_win.cc
+++ b/chromium/base/debug/stack_trace_win.cc
@@ -26,6 +26,9 @@ namespace {
// exception. Only used in unit tests.
LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = NULL;
+bool g_initialized_symbols = false;
+DWORD g_init_error = ERROR_SUCCESS;
+
// Prints the exception call stack.
// This is the unit tests exception filter.
long WINAPI StackDumpExceptionFilter(EXCEPTION_POINTERS* info) {
@@ -42,6 +45,55 @@ FilePath GetExePath() {
return FilePath(system_buffer);
}
+bool InitializeSymbols() {
+ if (g_initialized_symbols)
+ return g_init_error == ERROR_SUCCESS;
+ g_initialized_symbols = true;
+ // Defer symbol load until they're needed, use undecorated names, and get line
+ // numbers.
+ SymSetOptions(SYMOPT_DEFERRED_LOADS |
+ SYMOPT_UNDNAME |
+ SYMOPT_LOAD_LINES);
+ if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+ g_init_error = GetLastError();
+ // TODO(awong): Handle error: SymInitialize can fail with
+ // ERROR_INVALID_PARAMETER.
+ // When it fails, we should not call debugbreak since it kills the current
+ // process (prevents future tests from running or kills the browser
+ // process).
+ DLOG(ERROR) << "SymInitialize failed: " << g_init_error;
+ return false;
+ }
+
+ // When transferring the binaries e.g. between bots, path put
+ // into the executable will get off. To still retrieve symbols correctly,
+ // add the directory of the executable to symbol search path.
+ // All following errors are non-fatal.
+ const size_t kSymbolsArraySize = 1024;
+ scoped_ptr<wchar_t[]> symbols_path(new wchar_t[kSymbolsArraySize]);
+
+ // Note: The below function takes buffer size as number of characters,
+ // not number of bytes!
+ if (!SymGetSearchPathW(GetCurrentProcess(),
+ symbols_path.get(),
+ kSymbolsArraySize)) {
+ g_init_error = GetLastError();
+ DLOG(WARNING) << "SymGetSearchPath failed: " << g_init_error;
+ return false;
+ }
+
+ std::wstring new_path(std::wstring(symbols_path.get()) +
+ L";" + GetExePath().DirName().value());
+ if (!SymSetSearchPathW(GetCurrentProcess(), new_path.c_str())) {
+ g_init_error = GetLastError();
+ DLOG(WARNING) << "SymSetSearchPath failed." << g_init_error;
+ return false;
+ }
+
+ g_init_error = ERROR_SUCCESS;
+ return true;
+}
+
// SymbolContext is a threadsafe singleton that wraps the DbgHelp Sym* family
// of functions. The Sym* family of functions may only be invoked by one
// thread at a time. SymbolContext code may access a symbol server over the
@@ -66,11 +118,6 @@ class SymbolContext {
Singleton<SymbolContext, LeakySingletonTraits<SymbolContext> >::get();
}
- // Returns the error code of a failed initialization.
- DWORD init_error() const {
- return init_error_;
- }
-
// For the given trace, attempts to resolve the symbols, and output a trace
// to the ostream os. The format for each line of the backtrace is:
//
@@ -132,51 +179,10 @@ class SymbolContext {
private:
friend struct DefaultSingletonTraits<SymbolContext>;
- SymbolContext() : init_error_(ERROR_SUCCESS) {
- // Initializes the symbols for the process.
- // Defer symbol load until they're needed, use undecorated names, and
- // get line numbers.
- SymSetOptions(SYMOPT_DEFERRED_LOADS |
- SYMOPT_UNDNAME |
- SYMOPT_LOAD_LINES);
- if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
- init_error_ = GetLastError();
- // TODO(awong): Handle error: SymInitialize can fail with
- // ERROR_INVALID_PARAMETER.
- // When it fails, we should not call debugbreak since it kills the current
- // process (prevents future tests from running or kills the browser
- // process).
- DLOG(ERROR) << "SymInitialize failed: " << init_error_;
- return;
- }
-
- init_error_ = ERROR_SUCCESS;
-
- // When transferring the binaries e.g. between bots, path put
- // into the executable will get off. To still retrieve symbols correctly,
- // add the directory of the executable to symbol search path.
- // All following errors are non-fatal.
- const size_t kSymbolsArraySize = 1024;
- scoped_ptr<wchar_t[]> symbols_path(new wchar_t[kSymbolsArraySize]);
-
- // Note: The below function takes buffer size as number of characters,
- // not number of bytes!
- if (!SymGetSearchPathW(GetCurrentProcess(),
- symbols_path.get(),
- kSymbolsArraySize)) {
- DLOG(WARNING) << "SymGetSearchPath failed: ";
- return;
- }
-
- std::wstring new_path(std::wstring(symbols_path.get()) +
- L";" + GetExePath().DirName().value());
- if (!SymSetSearchPathW(GetCurrentProcess(), new_path.c_str())) {
- DLOG(WARNING) << "SymSetSearchPath failed.";
- return;
- }
+ SymbolContext() {
+ InitializeSymbols();
}
- DWORD init_error_;
base::Lock lock_;
DISALLOW_COPY_AND_ASSIGN(SymbolContext);
};
@@ -187,8 +193,11 @@ bool EnableInProcessStackDumping() {
// Add stack dumping support on exception on windows. Similar to OS_POSIX
// signal() handling in process_util_posix.cc.
g_previous_filter = SetUnhandledExceptionFilter(&StackDumpExceptionFilter);
- RouteStdioToConsole();
- return true;
+
+ // Need to initialize symbols early in the process or else this fails on
+ // swarming (since symbols are in different directory than in the exes) and
+ // also release x64.
+ return InitializeSymbols();
}
// Disable optimizations for the StackTrace::StackTrace function. It is
@@ -209,18 +218,12 @@ StackTrace::StackTrace() {
#pragma optimize("", on)
#endif
-StackTrace::StackTrace(const EXCEPTION_POINTERS* exception_pointers) {
- // StackWalk64() may modify context record passed to it, so we will
- // use a copy.
- CONTEXT context_record = *exception_pointers->ContextRecord;
- InitTrace(&context_record);
+StackTrace::StackTrace(EXCEPTION_POINTERS* exception_pointers) {
+ InitTrace(exception_pointers->ContextRecord);
}
-StackTrace::StackTrace(const CONTEXT* context) {
- // StackWalk64() may modify context record passed to it, so we will
- // use a copy.
- CONTEXT context_record = *context;
- InitTrace(&context_record);
+StackTrace::StackTrace(CONTEXT* context) {
+ InitTrace(context);
}
void StackTrace::InitTrace(CONTEXT* context_record) {
@@ -266,9 +269,8 @@ void StackTrace::Print() const {
void StackTrace::OutputToStream(std::ostream* os) const {
SymbolContext* context = SymbolContext::GetInstance();
- DWORD error = context->init_error();
- if (error != ERROR_SUCCESS) {
- (*os) << "Error initializing symbols (" << error
+ if (g_init_error != ERROR_SUCCESS) {
+ (*os) << "Error initializing symbols (" << g_init_error
<< "). Dumping unresolved backtrace:\n";
for (size_t i = 0; (i < count_) && os->good(); ++i) {
(*os) << "\t" << trace_[i] << "\n";
diff --git a/chromium/base/debug/task_annotator.cc b/chromium/base/debug/task_annotator.cc
index 19df8cb39e5..e47a0439e37 100644
--- a/chromium/base/debug/task_annotator.cc
+++ b/chromium/base/debug/task_annotator.cc
@@ -20,36 +20,25 @@ TaskAnnotator::~TaskAnnotator() {
void TaskAnnotator::DidQueueTask(const char* queue_function,
const PendingTask& pending_task) {
- TRACE_EVENT_FLOW_BEGIN0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
queue_function,
- TRACE_ID_MANGLE(GetTaskTraceID(pending_task)));
+ TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+ TRACE_EVENT_FLAG_FLOW_OUT);
}
void TaskAnnotator::RunTask(const char* queue_function,
- const char* run_function,
const PendingTask& pending_task) {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
tracked_objects::Duration queue_duration =
stopwatch.StartTime() - pending_task.EffectiveTimePosted();
- TRACE_EVENT_FLOW_END1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- queue_function,
- TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
- "queue_duration",
- queue_duration.InMilliseconds());
-
- // When tracing memory for posted tasks it's more valuable to attribute the
- // memory allocations to the source function than generically to the task
- // runner.
- TRACE_EVENT_WITH_MEMORY_TAG2(
- "toplevel",
- run_function,
- pending_task.posted_from.function_name(), // Name for memory tracking.
- "src_file",
- pending_task.posted_from.file_name(),
- "src_func",
- pending_task.posted_from.function_name());
+ TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ queue_function,
+ TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
+ TRACE_EVENT_FLAG_FLOW_IN,
+ "queue_duration",
+ queue_duration.InMilliseconds());
// Before running the task, store the program counter where it was posted
// and deliberately alias it to ensure it is on the stack if the task
diff --git a/chromium/base/debug/task_annotator.h b/chromium/base/debug/task_annotator.h
index aa5f17b11fe..74068d92040 100644
--- a/chromium/base/debug/task_annotator.h
+++ b/chromium/base/debug/task_annotator.h
@@ -25,11 +25,8 @@ class BASE_EXPORT TaskAnnotator {
const PendingTask& pending_task);
// Run a previously queued task. |queue_function| should match what was
- // passed into |DidQueueTask| for this task. |run_function| is used as the
- // name for the trace event that surrounds the task's execution.
- void RunTask(const char* queue_function,
- const char* run_function,
- const PendingTask& pending_task);
+ // passed into |DidQueueTask| for this task.
+ void RunTask(const char* queue_function, const PendingTask& pending_task);
private:
// Creates a process-wide unique ID to represent this task in trace events.
@@ -40,6 +37,13 @@ class BASE_EXPORT TaskAnnotator {
DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
};
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ TRACE_EVENT_WITH_MEMORY_TAG2( \
+ "toplevel", (run_function), \
+ (task).posted_from.function_name(), /* Name for memory tracking. */ \
+ "src_file", (task).posted_from.file_name(), "src_func", \
+ (task).posted_from.function_name());
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/task_annotator_unittest.cc b/chromium/base/debug/task_annotator_unittest.cc
index ddffc21ab02..9f5c442327e 100644
--- a/chromium/base/debug/task_annotator_unittest.cc
+++ b/chromium/base/debug/task_annotator_unittest.cc
@@ -24,8 +24,7 @@ TEST(TaskAnnotatorTest, QueueAndRunTask) {
TaskAnnotator annotator;
annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
EXPECT_EQ(0, result);
- annotator.RunTask(
- "TaskAnnotatorTest::Queue", "TaskAnnotatorTest::Run", pending_task);
+ annotator.RunTask("TaskAnnotatorTest::Queue", pending_task);
EXPECT_EQ(123, result);
}
diff --git a/chromium/base/debug_message.cc b/chromium/base/debug_message.cc
deleted file mode 100644
index 10f441d3155..00000000000
--- a/chromium/base/debug_message.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <windows.h>
-
-// Display the command line. This program is designed to be called from
-// another process to display assertions. Since the other process has
-// complete control of our command line, we assume that it did *not*
-// add the program name as the first parameter. This allows us to just
-// show the command line directly as the message.
-int APIENTRY WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
- LPSTR lpCmdLine, int nCmdShow) {
- LPWSTR cmdline = GetCommandLineW();
- MessageBox(NULL, cmdline, L"Kr\x00d8m", MB_TOPMOST);
- return 0;
-}
diff --git a/chromium/base/environment.cc b/chromium/base/environment.cc
index 245051d0c18..11b2bc39a9e 100644
--- a/chromium/base/environment.cc
+++ b/chromium/base/environment.cc
@@ -33,9 +33,9 @@ class EnvironmentImpl : public Environment {
char first_char = variable_name[0];
std::string alternate_case_var;
if (first_char >= 'a' && first_char <= 'z')
- alternate_case_var = StringToUpperASCII(std::string(variable_name));
+ alternate_case_var = ToUpperASCII(variable_name);
else if (first_char >= 'A' && first_char <= 'Z')
- alternate_case_var = StringToLowerASCII(std::string(variable_name));
+ alternate_case_var = ToLowerASCII(variable_name);
else
return false;
return GetVarImpl(alternate_case_var.c_str(), result);
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
new file mode 100644
index 00000000000..2acd2900dca
--- /dev/null
+++ b/chromium/base/feature_list.cc
@@ -0,0 +1,170 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_split.h"
+
+namespace base {
+
+namespace {
+
+// Pointer to the FeatureList instance singleton that was set via
+// FeatureList::SetInstance(). Does not use base/memory/singleton.h in order to
+// have more control over initialization timing. Leaky.
+FeatureList* g_instance = nullptr;
+
+// Splits a comma-separated string containing feature names into a vector.
+std::vector<std::string> SplitFeatureListString(const std::string& input) {
+ return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+} // namespace
+
+FeatureList::FeatureList() : initialized_(false) {}
+
+FeatureList::~FeatureList() {}
+
+void FeatureList::InitializeFromCommandLine(
+ const std::string& enable_features,
+ const std::string& disable_features) {
+ DCHECK(!initialized_);
+
+ // Process disabled features first, so that disabled ones take precedence over
+ // enabled ones (since RegisterOverride() uses insert()).
+ for (const auto& feature_name : SplitFeatureListString(disable_features)) {
+ RegisterOverride(feature_name, OVERRIDE_DISABLE_FEATURE, nullptr);
+ }
+ for (const auto& feature_name : SplitFeatureListString(enable_features)) {
+ RegisterOverride(feature_name, OVERRIDE_ENABLE_FEATURE, nullptr);
+ }
+}
+
+bool FeatureList::IsFeatureOverriddenFromCommandLine(
+ const std::string& feature_name,
+ OverrideState state) const {
+ auto it = overrides_.find(feature_name);
+ return it != overrides_.end() && it->second.overridden_state == state &&
+ !it->second.overridden_by_field_trial;
+}
+
+void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
+ OverrideState override_state,
+ FieldTrial* field_trial) {
+ DCHECK(field_trial);
+ DCHECK(!ContainsKey(overrides_, feature_name) ||
+ !overrides_.find(feature_name)->second.field_trial)
+ << "Feature " << feature_name
+ << " has conflicting field trial overrides: "
+ << overrides_.find(feature_name)->second.field_trial->trial_name()
+ << " / " << field_trial->trial_name();
+
+ RegisterOverride(feature_name, override_state, field_trial);
+}
+
+void FeatureList::AssociateReportingFieldTrial(
+ const std::string& feature_name,
+ OverrideState for_overridden_state,
+ FieldTrial* field_trial) {
+ DCHECK(
+ IsFeatureOverriddenFromCommandLine(feature_name, for_overridden_state));
+
+ // Only one associated field trial is supported per feature. This is generally
+ // enforced server-side.
+ OverrideEntry* entry = &overrides_.find(feature_name)->second;
+ if (entry->field_trial) {
+ NOTREACHED() << "Feature " << feature_name
+ << " already has trial: " << entry->field_trial->trial_name()
+ << ", associating trial: " << field_trial->trial_name();
+ return;
+ }
+
+ entry->field_trial = field_trial;
+}
+
+// static
+bool FeatureList::IsEnabled(const Feature& feature) {
+ return GetInstance()->IsFeatureEnabled(feature);
+}
+
+// static
+FeatureList* FeatureList::GetInstance() {
+ return g_instance;
+}
+
+// static
+void FeatureList::SetInstance(scoped_ptr<FeatureList> instance) {
+ DCHECK(!g_instance);
+ instance->FinalizeInitialization();
+
+ // Note: Intentional leak of global singleton.
+ g_instance = instance.release();
+}
+
+// static
+void FeatureList::ClearInstanceForTesting() {
+ delete g_instance;
+ g_instance = nullptr;
+}
+
+void FeatureList::FinalizeInitialization() {
+ DCHECK(!initialized_);
+ initialized_ = true;
+}
+
+bool FeatureList::IsFeatureEnabled(const Feature& feature) {
+ DCHECK(initialized_);
+ DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+ auto it = overrides_.find(feature.name);
+ if (it != overrides_.end()) {
+ const OverrideEntry& entry = it->second;
+
+ // Activate the corresponding field trial, if necessary.
+ if (entry.field_trial)
+ entry.field_trial->group();
+
+ // TODO(asvitkine) Expand this section as more support is added.
+ return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
+ }
+ // Otherwise, return the default state.
+ return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+}
+
+void FeatureList::RegisterOverride(const std::string& feature_name,
+ OverrideState overridden_state,
+ FieldTrial* field_trial) {
+ DCHECK(!initialized_);
+ // Note: The semantics of insert() is that it does not overwrite the entry if
+ // one already exists for the key. Thus, only the first override for a given
+ // feature name takes effect.
+ overrides_.insert(std::make_pair(
+ feature_name, OverrideEntry(overridden_state, field_trial)));
+}
+
+bool FeatureList::CheckFeatureIdentity(const Feature& feature) {
+ AutoLock auto_lock(feature_identity_tracker_lock_);
+
+ auto it = feature_identity_tracker_.find(feature.name);
+ if (it == feature_identity_tracker_.end()) {
+ // If it's not tracked yet, register it.
+ feature_identity_tracker_[feature.name] = &feature;
+ return true;
+ }
+ // Compare address of |feature| to the existing tracked entry.
+ return it->second == &feature;
+}
+
+FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
+ FieldTrial* field_trial)
+ : overridden_state(overridden_state),
+ field_trial(field_trial),
+ overridden_by_field_trial(field_trial != nullptr) {}
+
+} // namespace base
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
new file mode 100644
index 00000000000..0f91a3efa87
--- /dev/null
+++ b/chromium/base/feature_list.h
@@ -0,0 +1,209 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FEATURE_LIST_H_
+#define BASE_FEATURE_LIST_H_
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FieldTrial;
+
+// Specifies whether a given feature is enabled or disabled by default.
+enum FeatureState {
+ FEATURE_DISABLED_BY_DEFAULT,
+ FEATURE_ENABLED_BY_DEFAULT,
+};
+
+// The Feature struct is used to define the default state for a feature. See
+// comment below for more details. There must only ever be one struct instance
+// for a given feature name - generally defined as a constant global variable or
+// file static.
+struct BASE_EXPORT Feature {
+ // The name of the feature. This should be unique to each feature and is used
+ // for enabling/disabling features via command line flags and experiments.
+ const char* const name;
+
+ // The default state (i.e. enabled or disabled) for this feature.
+ const FeatureState default_state;
+};
+
+// The FeatureList class is used to determine whether a given feature is on or
+// off. It provides an authoritative answer, taking into account command-line
+// overrides and experimental control.
+//
+// The basic use case is for any feature that can be toggled (e.g. through
+// command-line or an experiment) to have a defined Feature struct, e.g.:
+//
+// struct base::Feature kMyGreatFeature {
+// "MyGreatFeature", base::FEATURE_ENABLED_BY_DEFAULT
+// };
+//
+// Then, client code that wishes to query the state of the feature would check:
+//
+// if (base::FeatureList::IsEnabled(kMyGreatFeature)) {
+// // Feature code goes here.
+// }
+//
+// Behind the scenes, the above call would take into account any command-line
+// flags to enable or disable the feature, any experiments that may control it
+// and finally its default state (in that order of priority), to determine
+// whether the feature is on.
+//
+// Features can be explicitly forced on or off by specifying a list of comma-
+// separated feature names via the following command-line flags:
+//
+// --enable-features=Feature5,Feature7
+// --disable-features=Feature1,Feature2,Feature3
+//
+// After initialization (which should be done single-threaded), the FeatureList
+// API is thread safe.
+//
+// Note: This class is a singleton, but does not use base/memory/singleton.h in
+// order to have control over its initialization sequence. Specifically, the
+// intended use is to create an instance of this class and fully initialize it,
+// before setting it as the singleton for a process, via SetInstance().
+class BASE_EXPORT FeatureList {
+ public:
+ FeatureList();
+ ~FeatureList();
+
+ // Initializes feature overrides via command-line flags |enable_features| and
+ // |disable_features|, each of which is a comma-separated list of features to
+ // enable or disable, respectively. If a feature appears on both lists, then
+ // it will be disabled. Must only be invoked during the initialization phase
+ // (before FinalizeInitialization() has been called).
+ void InitializeFromCommandLine(const std::string& enable_features,
+ const std::string& disable_features);
+
+ // Specifies whether a feature override enables or disables the feature.
+ enum OverrideState {
+ OVERRIDE_DISABLE_FEATURE,
+ OVERRIDE_ENABLE_FEATURE,
+ };
+
+ // Returns true if the state of |feature_name| has been overridden via
+ // |InitializeFromCommandLine()|.
+ bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
+ OverrideState state) const;
+
+ // Associates a field trial for reporting purposes corresponding to the
+ // command-line setting the feature state to |for_overridden_state|. The trial
+ // will be activated when the state of the feature is first queried. This
+ // should be called during registration, after InitializeFromCommandLine() has
+ // been called but before the instance is registered via SetInstance().
+ void AssociateReportingFieldTrial(const std::string& feature_name,
+ OverrideState for_overridden_state,
+ FieldTrial* field_trial);
+
+ // Registers a field trial to override the enabled state of the specified
+ // feature to |override_state|. Command-line overrides still take precedence
+ // over field trials, so this will have no effect if the feature is being
+ // overridden from the command-line. The associated field trial will be
+ // activated when the feature state for this feature is queried. This should
+ // be called during registration, after InitializeFromCommandLine() has been
+ // called but before the instance is registered via SetInstance().
+ void RegisterFieldTrialOverride(const std::string& feature_name,
+ OverrideState override_state,
+ FieldTrial* field_trial);
+
+ // Returns whether the given |feature| is enabled. Must only be called after
+ // the singleton instance has been registered via SetInstance(). Additionally,
+ // a feature with a given name must only have a single corresponding Feature
+ // struct, which is checked in builds with DCHECKs enabled.
+ static bool IsEnabled(const Feature& feature);
+
+ // Returns the singleton instance of FeatureList. Will return null until an
+ // instance is registered via SetInstance().
+ static FeatureList* GetInstance();
+
+ // Registers the given |instance| to be the singleton feature list for this
+ // process. This should only be called once and |instance| must not be null.
+ static void SetInstance(scoped_ptr<FeatureList> instance);
+
+ // Clears the previously-registered singleton instance for tests.
+ static void ClearInstanceForTesting();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+
+ struct OverrideEntry {
+ // The overridden enable (on/off) state of the feature.
+ const OverrideState overridden_state;
+
+ // An optional associated field trial, which will be activated when the
+ // state of the feature is queried for the first time. Weak pointer to the
+ // FieldTrial object that is owned by the FieldTrialList singleton.
+ base::FieldTrial* field_trial;
+
+ // Specifies whether the feature's state is overridden by |field_trial|.
+ // If it's not, and |field_trial| is not null, it means it is simply an
+ // associated field trial for reporting purposes (and |overridden_state|
+ // came from the command-line).
+ const bool overridden_by_field_trial;
+
+ // TODO(asvitkine): Expand this as more support is added.
+
+ // Constructs an OverrideEntry for the given |overridden_state|. If
+ // |field_trial| is not null, it implies that |overridden_state| comes from
+ // the trial, so |overridden_by_field_trial| will be set to true.
+ OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
+ };
+
+ // Finalizes the initialization state of the FeatureList, so that no further
+ // overrides can be registered. This is called by SetInstance() on the
+ // singleton feature list that is being registered.
+ void FinalizeInitialization();
+
+ // Returns whether the given |feature| is enabled. This is invoked by the
+ // public FeatureList::IsEnabled() static function on the global singleton.
+ // Requires the FeatureList to have already been fully initialized.
+ bool IsFeatureEnabled(const Feature& feature);
+
+ // Registers an override for feature |feature_name|. The override specifies
+ // whether the feature should be on or off (via |overridden_state|), which
+ // will take precedence over the feature's default state. If |field_trial| is
+ // not null, registers the specified field trial object to be associated with
+ // the feature, which will activate the field trial when the feature state is
+ // queried. If an override is already registered for the given feature, it
+ // will not be changed.
+ void RegisterOverride(const std::string& feature_name,
+ OverrideState overridden_state,
+ FieldTrial* field_trial);
+
+ // Verifies that there's only a single definition of a Feature struct for a
+ // given feature name. Keeps track of the first seen Feature struct for each
+ // feature. Returns false when called on a Feature struct with a different
+ // address than the first one it saw for that feature name. Used only from
+ // DCHECKs and tests.
+ bool CheckFeatureIdentity(const Feature& feature);
+
+ // Map from feature name to an OverrideEntry struct for the feature, if it
+ // exists.
+ std::map<std::string, OverrideEntry> overrides_;
+
+ // Locked map that keeps track of seen features, to ensure a single feature is
+ // only defined once. This verification is only done in builds with DCHECKs
+ // enabled.
+ Lock feature_identity_tracker_lock_;
+ std::map<std::string, const Feature*> feature_identity_tracker_;
+
+ // Whether this object has been fully initialized. This gets set to true as a
+ // result of FinalizeInitialization().
+ bool initialized_;
+
+ DISALLOW_COPY_AND_ASSIGN(FeatureList);
+};
+
+} // namespace base
+
+#endif // BASE_FEATURE_LIST_H_
diff --git a/chromium/base/feature_list_unittest.cc b/chromium/base/feature_list_unittest.cc
new file mode 100644
index 00000000000..9d8538e9640
--- /dev/null
+++ b/chromium/base/feature_list_unittest.cc
@@ -0,0 +1,310 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include "base/format_macros.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const char kFeatureOnByDefaultName[] = "OnByDefault";
+struct Feature kFeatureOnByDefault {
+ kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+};
+
+const char kFeatureOffByDefaultName[] = "OffByDefault";
+struct Feature kFeatureOffByDefault {
+ kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
+};
+
+} // namespace
+
+class FeatureListTest : public testing::Test {
+ public:
+ FeatureListTest() : feature_list_(nullptr) {
+ RegisterFeatureListInstance(make_scoped_ptr(new FeatureList));
+ }
+ ~FeatureListTest() override { ClearFeatureListInstance(); }
+
+ void RegisterFeatureListInstance(scoped_ptr<FeatureList> feature_list) {
+ feature_list_ = feature_list.get();
+ FeatureList::SetInstance(feature_list.Pass());
+ }
+ void ClearFeatureListInstance() {
+ FeatureList::ClearInstanceForTesting();
+ feature_list_ = nullptr;
+ }
+
+ FeatureList* feature_list() { return feature_list_; }
+
+ private:
+ // Weak. Owned by the FeatureList::SetInstance().
+ FeatureList* feature_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(FeatureListTest);
+};
+
+TEST_F(FeatureListTest, DefaultStates) {
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine) {
+ struct {
+ const char* enable_features;
+ const char* disable_features;
+ bool expected_feature_on_state;
+ bool expected_feature_off_state;
+ } test_cases[] = {
+ {"", "", true, false},
+ {"OffByDefault", "", true, true},
+ {"OffByDefault", "OnByDefault", false, true},
+ {"OnByDefault,OffByDefault", "", true, true},
+ {"", "OnByDefault,OffByDefault", false, false},
+ // In the case an entry is both, disable takes precedence.
+ {"OnByDefault", "OnByDefault,OffByDefault", false, false},
+ };
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ const auto& test_case = test_cases[i];
+ SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+ test_case.enable_features,
+ test_case.disable_features));
+
+ ClearFeatureListInstance();
+ scoped_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine(test_case.enable_features,
+ test_case.disable_features);
+ RegisterFeatureListInstance(feature_list.Pass());
+
+ EXPECT_EQ(test_case.expected_feature_on_state,
+ FeatureList::IsEnabled(kFeatureOnByDefault))
+ << i;
+ EXPECT_EQ(test_case.expected_feature_off_state,
+ FeatureList::IsEnabled(kFeatureOffByDefault))
+ << i;
+ }
+}
+
+TEST_F(FeatureListTest, CheckFeatureIdentity) {
+ // Tests that CheckFeatureIdentity() correctly detects when two different
+ // structs with the same feature name are passed to it.
+
+ // Call it twice for each feature at the top of the file, since the first call
+ // makes it remember the entry and the second call will verify it.
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+ EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+
+ // Now, call it with a distinct struct for |kFeatureOnByDefaultName|, which
+ // should return false.
+ struct Feature kFeatureOnByDefault2 {
+ kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+ };
+ EXPECT_FALSE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault2));
+}
+
+TEST_F(FeatureListTest, FieldTrialOverrides) {
+ struct {
+ FeatureList::OverrideState trial1_state;
+ FeatureList::OverrideState trial2_state;
+ } test_cases[] = {
+ {FeatureList::OVERRIDE_DISABLE_FEATURE,
+ FeatureList::OVERRIDE_DISABLE_FEATURE},
+ {FeatureList::OVERRIDE_DISABLE_FEATURE,
+ FeatureList::OVERRIDE_ENABLE_FEATURE},
+ {FeatureList::OVERRIDE_ENABLE_FEATURE,
+ FeatureList::OVERRIDE_DISABLE_FEATURE},
+ {FeatureList::OVERRIDE_ENABLE_FEATURE,
+ FeatureList::OVERRIDE_ENABLE_FEATURE},
+ };
+
+ FieldTrial::ActiveGroup active_group;
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ const auto& test_case = test_cases[i];
+ SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]", i));
+
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ scoped_ptr<FeatureList> feature_list(new FeatureList);
+
+ FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+ FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+ feature_list->RegisterFieldTrialOverride(kFeatureOnByDefaultName,
+ test_case.trial1_state, trial1);
+ feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+ test_case.trial2_state, trial2);
+ RegisterFeatureListInstance(feature_list.Pass());
+
+ // Initially, neither trial should be active.
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+ const bool expected_enabled_1 =
+ (test_case.trial1_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+ EXPECT_EQ(expected_enabled_1, FeatureList::IsEnabled(kFeatureOnByDefault));
+ // The above should have activated |trial1|.
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+ const bool expected_enabled_2 =
+ (test_case.trial2_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+ EXPECT_EQ(expected_enabled_2, FeatureList::IsEnabled(kFeatureOffByDefault));
+ // The above should have activated |trial2|.
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+ }
+}
+
+TEST_F(FeatureListTest, CommandLineTakesPrecedenceOverFieldTrial) {
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ scoped_ptr<FeatureList> feature_list(new FeatureList);
+
+ // The feature is explicitly enabled on the command-line.
+ feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+ // But the FieldTrial would set the feature to disabled.
+ FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample2", "A");
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, trial);
+ RegisterFeatureListInstance(feature_list.Pass());
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+ // Command-line should take precedence.
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ // Since the feature is on due to the command-line, and not as a result of the
+ // field trial, the field trial should not be activated (since the Associate*
+ // API wasn't used.)
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+}
+
+TEST_F(FeatureListTest, IsFeatureOverriddenFromCommandLine) {
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ scoped_ptr<FeatureList> feature_list(new FeatureList);
+
+ // No features are overridden from the command line yet
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+ // Now, enable |kFeatureOffByDefaultName| via the command-line.
+ feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+ // It should now be overridden for the enabled group.
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+ // Register a field trial to associate with the feature and ensure that the
+ // results are still the same.
+ feature_list->AssociateReportingFieldTrial(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+ FieldTrialList::CreateFieldTrial("Trial1", "A"));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+ // Now, register a field trial to override |kFeatureOnByDefaultName| state
+ // and check that the function still returns false for that feature.
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+ FieldTrialList::CreateFieldTrial("Trial2", "A"));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+ EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ RegisterFeatureListInstance(feature_list.Pass());
+
+ // Check the expected feature states for good measure.
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+}
+
+TEST_F(FeatureListTest, AssociateReportingFieldTrial) {
+ struct {
+ const char* enable_features;
+ const char* disable_features;
+ bool expected_enable_trial_created;
+ bool expected_disable_trial_created;
+ } test_cases[] = {
+ // If no enable/disable flags are specified, no trials should be created.
+ {"", "", false, false},
+ // Enabling the feature should result in the enable trial created.
+ {kFeatureOffByDefaultName, "", true, false},
+ // Disabling the feature should result in the disable trial created.
+ {"", kFeatureOffByDefaultName, false, true},
+ };
+
+ const char kTrialName[] = "ForcingTrial";
+ const char kForcedOnGroupName[] = "ForcedOn";
+ const char kForcedOffGroupName[] = "ForcedOff";
+
+ for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ const auto& test_case = test_cases[i];
+ SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+ test_case.enable_features,
+ test_case.disable_features));
+
+ ClearFeatureListInstance();
+
+ FieldTrialList field_trial_list(nullptr);
+ scoped_ptr<FeatureList> feature_list(new FeatureList);
+ feature_list->InitializeFromCommandLine(test_case.enable_features,
+ test_case.disable_features);
+
+ FieldTrial* enable_trial = nullptr;
+ if (feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE)) {
+ enable_trial = base::FieldTrialList::CreateFieldTrial(kTrialName,
+ kForcedOnGroupName);
+ feature_list->AssociateReportingFieldTrial(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+ enable_trial);
+ }
+ FieldTrial* disable_trial = nullptr;
+ if (feature_list->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+ disable_trial = base::FieldTrialList::CreateFieldTrial(
+ kTrialName, kForcedOffGroupName);
+ feature_list->AssociateReportingFieldTrial(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+ disable_trial);
+ }
+ EXPECT_EQ(test_case.expected_enable_trial_created, enable_trial != nullptr);
+ EXPECT_EQ(test_case.expected_disable_trial_created,
+ disable_trial != nullptr);
+ RegisterFeatureListInstance(feature_list.Pass());
+
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+ if (disable_trial) {
+ EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+ EXPECT_EQ(kForcedOffGroupName, disable_trial->group_name());
+ } else if (enable_trial) {
+ EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+ EXPECT_EQ(kForcedOnGroupName, enable_trial->group_name());
+ }
+ }
+}
+
+} // namespace base
diff --git a/chromium/base/file_descriptor_posix.h b/chromium/base/file_descriptor_posix.h
index 376ad39e307..2a366116a31 100644
--- a/chromium/base/file_descriptor_posix.h
+++ b/chromium/base/file_descriptor_posix.h
@@ -14,9 +14,16 @@ namespace base {
// We introduct a special structure for file descriptors in order that we are
// able to use template specialisation to special-case their handling.
//
-// WARNING: (Chromium only) There are subtleties to consider if serialising
-// these objects over IPC. See comments in ipc/ipc_message_utils.h
-// above the template specialisation for this structure.
+// IMPORTANT: This is primarily intended for use when sending file descriptors
+// over IPC. Even if |auto_close| is true, base::FileDescriptor does NOT close()
+// |fd| when going out of scope. Instead, a consumer of a base::FileDescriptor
+// must invoke close() on |fd| if |auto_close| is true.
+//
+// In the case of IPC, the the IPC subsystem knows to close() |fd| after sending
+// a message that contains a base::FileDescriptor if auto_close == true. On the
+// other end, the receiver must make sure to close() |fd| after it has finished
+// processing the IPC message. See the IPC::ParamTraits<> specialization in
+// ipc/ipc_message_utils.h for all the details.
// -----------------------------------------------------------------------------
struct FileDescriptor {
FileDescriptor() : fd(-1), auto_close(false) {}
diff --git a/chromium/base/file_version_info.h b/chromium/base/file_version_info.h
index 57b837c24b0..8c1bf92438e 100644
--- a/chromium/base/file_version_info.h
+++ b/chromium/base/file_version_info.h
@@ -32,6 +32,17 @@ class FilePath;
// version returns values from the Info.plist as appropriate. TODO(avi): make
// this a less-obvious Windows-ism.
+#if defined(OS_WIN)
+// Creates a FileVersionInfo for the current module. Returns NULL in case of
+// error. The returned object should be deleted when you are done with it. This
+// is done as a macro to force inlining of __ImageBase. It used to be inside of
+// a method labeled with __forceinline, but inlining through __forceinline
+// stopped working for Debug builds in VS2013 (http://crbug.com/516359).
+#define CREATE_FILE_VERSION_INFO_FOR_CURRENT_MODULE() \
+ FileVersionInfo::CreateFileVersionInfoForModule( \
+ reinterpret_cast<HMODULE>(&__ImageBase))
+#endif
+
class BASE_EXPORT FileVersionInfo {
public:
virtual ~FileVersionInfo() {}
@@ -46,17 +57,9 @@ class BASE_EXPORT FileVersionInfo {
#if defined(OS_WIN)
// Creates a FileVersionInfo for the specified module. Returns NULL in case
// of error. The returned object should be deleted when you are done with it.
+ // See CREATE_FILE_VERSION_INFO_FOR_CURRENT_MODULE() helper above for a
+ // CreateFileVersionInfoForCurrentModule() alternative for Windows.
static FileVersionInfo* CreateFileVersionInfoForModule(HMODULE module);
-
- // Creates a FileVersionInfo for the current module. Returns NULL in case
- // of error. The returned object should be deleted when you are done with it.
- // This function should be inlined so that the "current module" is evaluated
- // correctly, instead of being the module that contains base.
- __forceinline static FileVersionInfo*
- CreateFileVersionInfoForCurrentModule() {
- HMODULE module = reinterpret_cast<HMODULE>(&__ImageBase);
- return CreateFileVersionInfoForModule(module);
- }
#else
// Creates a FileVersionInfo for the current module. Returns NULL in case
// of error. The returned object should be deleted when you are done with it.
diff --git a/chromium/base/files/OWNERS b/chromium/base/files/OWNERS
deleted file mode 100644
index b99e8a2fc7a..00000000000
--- a/chromium/base/files/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-rvargas@chromium.org
-
-per-file file_path_watcher*=mnissler@chromium.org
diff --git a/chromium/base/files/dir_reader_posix_unittest.cc b/chromium/base/files/dir_reader_posix_unittest.cc
index 0685031a982..2e181b3d851 100644
--- a/chromium/base/files/dir_reader_posix_unittest.cc
+++ b/chromium/base/files/dir_reader_posix_unittest.cc
@@ -10,6 +10,7 @@
#include <string.h>
#include <unistd.h>
+#include "base/files/scoped_temp_dir.h"
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -25,8 +26,9 @@ TEST(DirReaderPosixUnittest, Read) {
if (DirReaderPosix::IsFallback())
return;
- char kDirTemplate[] = "/tmp/org.chromium.dir-reader-posix-XXXXXX";
- const char* dir = mkdtemp(kDirTemplate);
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ const char* dir = temp_dir.path().value().c_str();
ASSERT_TRUE(dir);
const int prev_wd = open(".", O_RDONLY | O_DIRECTORY);
diff --git a/chromium/base/files/file.h b/chromium/base/files/file.h
index cba43536356..976188b232e 100644
--- a/chromium/base/files/file.h
+++ b/chromium/base/files/file.h
@@ -21,7 +21,6 @@
#include "base/files/file_path.h"
#include "base/files/file_tracing.h"
#include "base/files/scoped_file.h"
-#include "base/gtest_prod_util.h"
#include "base/move.h"
#include "base/time/time.h"
@@ -29,8 +28,6 @@
#include "base/win/scoped_handle.h"
#endif
-FORWARD_DECLARE_TEST(FileTest, MemoryCorruption);
-
namespace base {
#if defined(OS_WIN)
@@ -306,55 +303,8 @@ class BASE_EXPORT File {
static std::string ErrorToString(Error error);
private:
- FRIEND_TEST_ALL_PREFIXES(::FileTest, MemoryCorruption);
-
friend class FileTracing::ScopedTrace;
-#if defined(OS_POSIX)
- // Encloses a single ScopedFD, saving a cheap tamper resistent memory checksum
- // alongside it. This checksum is validated at every access, allowing early
- // detection of memory corruption.
-
- // TODO(gavinp): This is in place temporarily to help us debug
- // https://crbug.com/424562 , which can't be reproduced in valgrind. Remove
- // this code after we have fixed this issue.
- class MemoryCheckingScopedFD {
- public:
- MemoryCheckingScopedFD();
- MemoryCheckingScopedFD(int fd);
- ~MemoryCheckingScopedFD();
-
- bool is_valid() const { Check(); return file_.is_valid(); }
- int get() const { Check(); return file_.get(); }
-
- void reset() { Check(); file_.reset(); UpdateChecksum(); }
- void reset(int fd) { Check(); file_.reset(fd); UpdateChecksum(); }
- int release() {
- Check();
- int fd = file_.release();
- UpdateChecksum();
- return fd;
- }
-
- private:
- FRIEND_TEST_ALL_PREFIXES(::FileTest, MemoryCorruption);
-
- // Computes the checksum for the current value of |file_|. Returns via an
- // out parameter to guard against implicit conversions of unsigned integral
- // types.
- void ComputeMemoryChecksum(unsigned int* out_checksum) const;
-
- // Confirms that the current |file_| and |file_memory_checksum_| agree,
- // failing a CHECK if they do not.
- void Check() const;
-
- void UpdateChecksum();
-
- ScopedFD file_;
- unsigned int file_memory_checksum_;
- };
-#endif
-
// Creates or opens the given file. Only called if |path| has no
// traversal ('..') components.
void DoInitialize(const FilePath& path, uint32 flags);
@@ -368,7 +318,7 @@ class BASE_EXPORT File {
#if defined(OS_WIN)
win::ScopedHandle file_;
#elif defined(OS_POSIX)
- MemoryCheckingScopedFD file_;
+ ScopedFD file_;
#endif
// A path to use for tracing purposes. Set if file tracing is enabled during
@@ -386,3 +336,4 @@ class BASE_EXPORT File {
} // namespace base
#endif // BASE_FILES_FILE_H_
+
diff --git a/chromium/base/files/file_enumerator_win.cc b/chromium/base/files/file_enumerator_win.cc
index ae41a4600d3..90db7f5729e 100644
--- a/chromium/base/files/file_enumerator_win.cc
+++ b/chromium/base/files/file_enumerator_win.cc
@@ -30,7 +30,8 @@ int64 FileEnumerator::FileInfo::GetSize() const {
ULARGE_INTEGER size;
size.HighPart = find_data_.nFileSizeHigh;
size.LowPart = find_data_.nFileSizeLow;
- DCHECK_LE(size.QuadPart, std::numeric_limits<int64>::max());
+ DCHECK_LE(size.QuadPart,
+ static_cast<ULONGLONG>(std::numeric_limits<int64>::max()));
return static_cast<int64>(size.QuadPart);
}
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index 92123533aaa..18775ed9ca9 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -10,9 +10,6 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/pickle.h"
-
-// These includes are just for the *Hack functions, and should be removed
-// when those functions are removed.
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
@@ -1259,11 +1256,12 @@ int FilePath::CompareIgnoreCase(StringPieceType string1,
#else // << WIN. MACOSX | other (POSIX) >>
-// Generic (POSIX) implementation of file string comparison.
-// TODO(rolandsteiner) check if this is sufficient/correct.
+// Generic Posix system comparisons.
int FilePath::CompareIgnoreCase(StringPieceType string1,
StringPieceType string2) {
- int comparison = strcasecmp(string1.data(), string2.data());
+ // Specifically need null termianted strings for this API call.
+ int comparison = strcasecmp(string1.as_string().c_str(),
+ string2.as_string().c_str());
if (comparison < 0)
return -1;
if (comparison > 0)
@@ -1316,7 +1314,7 @@ FilePath FilePath::NormalizePathSeparatorsTo(CharType separator) const {
#if defined(OS_ANDROID)
bool FilePath::IsContentUri() const {
- return StartsWithASCII(path_, "content://", false /*case_sensitive*/);
+ return StartsWith(path_, "content://", base::CompareCase::INSENSITIVE_ASCII);
}
#endif
diff --git a/chromium/base/files/file_path_unittest.cc b/chromium/base/files/file_path_unittest.cc
index 60eaa8f002f..bc0e8432e0e 100644
--- a/chromium/base/files/file_path_unittest.cc
+++ b/chromium/base/files/file_path_unittest.cc
@@ -10,6 +10,10 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
+#if defined(OS_POSIX)
+#include "base/test/scoped_locale.h"
+#endif
+
// This macro helps avoid wrapped lines in the test structs.
#define FPL(x) FILE_PATH_LITERAL(x)
@@ -1126,6 +1130,10 @@ TEST_F(FilePathTest, FromUTF8Unsafe_And_AsUTF8Unsafe) {
"\xEF\xBC\xA1\xEF\xBC\xA2\xEF\xBC\xA3.txt" },
};
+#if !defined(SYSTEM_NATIVE_UTF8) && defined(OS_LINUX)
+ ScopedLocale locale("en_US.UTF-8");
+#endif
+
for (size_t i = 0; i < arraysize(cases); ++i) {
// Test FromUTF8Unsafe() works.
FilePath from_utf8 = FilePath::FromUTF8Unsafe(cases[i].utf8);
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index ba2f1d96c84..6dfc0a6403e 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -28,6 +28,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/posix/eintr_wrapper.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/synchronization/lock.h"
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
@@ -167,9 +168,8 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
void RemoveRecursiveWatches();
// |path| is a symlink to a non-existent target. Attempt to add a watch to
- // the link target's parent directory. Returns true and update |watch_entry|
- // on success.
- bool AddWatchForBrokenSymlink(const FilePath& path, WatchEntry* watch_entry);
+ // the link target's parent directory. Update |watch_entry| on success.
+ void AddWatchForBrokenSymlink(const FilePath& path, WatchEntry* watch_entry);
bool HasValidWatchVector() const;
@@ -513,21 +513,19 @@ void FilePathWatcherImpl::UpdateWatches() {
// Walk the list of watches and update them as we go.
FilePath path(FILE_PATH_LITERAL("/"));
- bool path_valid = true;
for (size_t i = 0; i < watches_.size(); ++i) {
WatchEntry& watch_entry = watches_[i];
InotifyReader::Watch old_watch = watch_entry.watch;
watch_entry.watch = InotifyReader::kInvalidWatch;
watch_entry.linkname.clear();
- if (path_valid) {
- watch_entry.watch = g_inotify_reader.Get().AddWatch(path, this);
- if (watch_entry.watch == InotifyReader::kInvalidWatch) {
- if (IsLink(path)) {
- path_valid = AddWatchForBrokenSymlink(path, &watch_entry);
- } else {
- path_valid = false;
- }
- }
+ watch_entry.watch = g_inotify_reader.Get().AddWatch(path, this);
+ if (watch_entry.watch == InotifyReader::kInvalidWatch) {
+ // Ignore the error code (beyond symlink handling) to attempt to add
+ // watches on accessible children of unreadable directories. Note that
+ // this is a best-effort attempt; we may not catch events in this
+ // scenario.
+ if (IsLink(path))
+ AddWatchForBrokenSymlink(path, &watch_entry);
}
if (old_watch != watch_entry.watch)
g_inotify_reader.Get().RemoveWatch(old_watch, this);
@@ -643,12 +641,12 @@ void FilePathWatcherImpl::RemoveRecursiveWatches() {
recursive_watches_by_path_.clear();
}
-bool FilePathWatcherImpl::AddWatchForBrokenSymlink(const FilePath& path,
+void FilePathWatcherImpl::AddWatchForBrokenSymlink(const FilePath& path,
WatchEntry* watch_entry) {
DCHECK_EQ(InotifyReader::kInvalidWatch, watch_entry->watch);
FilePath link;
if (!ReadSymbolicLink(path, &link))
- return false;
+ return;
if (!link.IsAbsolute())
link = path.DirName().Append(link);
@@ -664,11 +662,10 @@ bool FilePathWatcherImpl::AddWatchForBrokenSymlink(const FilePath& path,
// exist. Ideally we should make sure we've watched all the components of
// the symlink path for changes. See crbug.com/91561 for details.
DPLOG(WARNING) << "Watch failed for " << link.DirName().value();
- return false;
+ return;
}
watch_entry->watch = watch;
watch_entry->linkname = link.BaseName().value();
- return true;
}
bool FilePathWatcherImpl::HasValidWatchVector() const {
diff --git a/chromium/base/files/file_path_watcher_win.cc b/chromium/base/files/file_path_watcher_win.cc
index 081698f8df1..3f37cec51c1 100644
--- a/chromium/base/files/file_path_watcher_win.cc
+++ b/chromium/base/files/file_path_watcher_win.cc
@@ -106,7 +106,7 @@ bool FilePathWatcherImpl::Watch(const FilePath& path,
if (!UpdateWatch())
return false;
- watcher_.StartWatching(handle_, this);
+ watcher_.StartWatchingOnce(handle_, this);
return true;
}
@@ -198,7 +198,7 @@ void FilePathWatcherImpl::OnObjectSignaled(HANDLE object) {
// The watch may have been cancelled by the callback.
if (handle_ != INVALID_HANDLE_VALUE)
- watcher_.StartWatching(handle_, this);
+ watcher_.StartWatchingOnce(handle_, this);
}
// static
diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc
index 7fb617c3f9d..a5aee01aa29 100644
--- a/chromium/base/files/file_posix.cc
+++ b/chromium/base/files/file_posix.cc
@@ -420,49 +420,6 @@ File::Error File::OSErrorToFileError(int saved_errno) {
}
}
-File::MemoryCheckingScopedFD::MemoryCheckingScopedFD() {
- UpdateChecksum();
-}
-
-File::MemoryCheckingScopedFD::MemoryCheckingScopedFD(int fd) : file_(fd) {
- UpdateChecksum();
-}
-
-File::MemoryCheckingScopedFD::~MemoryCheckingScopedFD() {}
-
-// static
-void File::MemoryCheckingScopedFD::ComputeMemoryChecksum(
- unsigned int* out_checksum) const {
- // Use a single iteration of a linear congruentional generator (lcg) to
- // provide a cheap checksum unlikely to be accidentally matched by a random
- // memory corruption.
-
- // By choosing constants that satisfy the Hull-Duebell Theorem on lcg cycle
- // length, we insure that each distinct fd value maps to a distinct checksum,
- // which maximises the utility of our checksum.
-
- // This code uses "unsigned int" throughout for its defined modular semantics,
- // which implicitly gives us a divisor that is a power of two.
-
- const unsigned int kMultiplier = 13035 * 4 + 1;
- COMPILE_ASSERT(((kMultiplier - 1) & 3) == 0, pred_must_be_multiple_of_four);
- const unsigned int kIncrement = 1595649551;
- COMPILE_ASSERT(kIncrement & 1, must_be_coprime_to_powers_of_two);
-
- *out_checksum =
- static_cast<unsigned int>(file_.get()) * kMultiplier + kIncrement;
-}
-
-void File::MemoryCheckingScopedFD::Check() const {
- unsigned int computed_checksum;
- ComputeMemoryChecksum(&computed_checksum);
- CHECK_EQ(file_memory_checksum_, computed_checksum) << "corrupted fd memory";
-}
-
-void File::MemoryCheckingScopedFD::UpdateChecksum() {
- ComputeMemoryChecksum(&file_memory_checksum_);
-}
-
// NaCl doesn't implement system calls to open files directly.
#if !defined(OS_NACL)
// TODO(erikkay): does it make sense to support FLAG_EXCLUSIVE_* here?
diff --git a/chromium/base/files/file_proxy_unittest.cc b/chromium/base/files/file_proxy_unittest.cc
index df0bbc869c2..efe5c924299 100644
--- a/chromium/base/files/file_proxy_unittest.cc
+++ b/chromium/base/files/file_proxy_unittest.cc
@@ -287,7 +287,13 @@ TEST_F(FileProxyTest, WriteAndFlush) {
}
}
-TEST_F(FileProxyTest, SetTimes) {
+#if defined(OS_ANDROID)
+// Flaky on Android, see http://crbug.com/489602
+#define MAYBE_SetTimes DISABLED_SetTimes
+#else
+#define MAYBE_SetTimes SetTimes
+#endif
+TEST_F(FileProxyTest, MAYBE_SetTimes) {
FileProxy proxy(file_task_runner());
CreateProxy(
File::FLAG_CREATE | File::FLAG_WRITE | File::FLAG_WRITE_ATTRIBUTES,
diff --git a/chromium/base/files/file_tracing.h b/chromium/base/files/file_tracing.h
index 92324c9475a..d37c21d9ed4 100644
--- a/chromium/base/files/file_tracing.h
+++ b/chromium/base/files/file_tracing.h
@@ -30,6 +30,8 @@ class BASE_EXPORT FileTracing {
class Provider {
public:
+ virtual ~Provider() = default;
+
// Whether the file tracing category is currently enabled.
virtual bool FileTracingCategoryIsEnabled() const = 0;
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index 5c594242bc8..67dbbfd1ec8 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -5,7 +5,6 @@
#include "base/files/file.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
-#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -495,7 +494,7 @@ TEST(FileTest, GetInfoForDirectory) {
base::File dir(
::CreateFile(empty_dir.value().c_str(),
- FILE_ALL_ACCESS,
+ GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL,
OPEN_EXISTING,
@@ -510,71 +509,3 @@ TEST(FileTest, GetInfoForDirectory) {
EXPECT_EQ(0, info.size);
}
#endif // defined(OS_WIN)
-
-#if defined(OS_POSIX) && defined(GTEST_HAS_DEATH_TEST)
-TEST(FileTest, MemoryCorruption) {
- {
- // Test that changing the checksum value is detected.
- base::File file;
- EXPECT_NE(file.file_.file_memory_checksum_,
- implicit_cast<unsigned int>(file.GetPlatformFile()));
- file.file_.file_memory_checksum_ = file.GetPlatformFile();
- EXPECT_DEATH(file.IsValid(), "");
-
- file.file_.UpdateChecksum(); // Do not crash on File::~File().
- }
-
- {
- // Test that changing the file descriptor value is detected.
- base::File file;
- file.file_.file_.reset(17);
- EXPECT_DEATH(file.IsValid(), "");
-
- // Do not crash on File::~File().
- ignore_result(file.file_.file_.release());
- file.file_.UpdateChecksum();
- }
-
- {
- // Test that GetPlatformFile() checks for corruption.
- base::File file;
- file.file_.file_memory_checksum_ = file.GetPlatformFile();
- EXPECT_DEATH(file.GetPlatformFile(), "");
-
- file.file_.UpdateChecksum(); // Do not crash on File::~File().
- }
-
- {
- // Test that the base::File destructor checks for corruption.
- scoped_ptr<base::File> file(new File());
- file->file_.file_memory_checksum_ = file->GetPlatformFile();
- EXPECT_DEATH(file.reset(), "");
-
- // Do not crash on this thread's destructor call.
- file->file_.UpdateChecksum();
- }
-
- {
- // Test that the base::File constructor checks for corruption.
- base::File file;
- file.file_.file_memory_checksum_ = file.GetPlatformFile();
- EXPECT_DEATH(File f(file.Pass()), "");
-
- file.file_.UpdateChecksum(); // Do not crash on File::~File().
- }
-
- {
- // Test that doing IO checks for corruption.
- base::File file;
- file.file_.file_.reset(17); // A fake open FD value.
-
- EXPECT_DEATH(file.Seek(File::FROM_BEGIN, 0), "");
- EXPECT_DEATH(file.Read(0, NULL, 0), "");
- EXPECT_DEATH(file.ReadAtCurrentPos(NULL, 0), "");
- EXPECT_DEATH(file.Write(0, NULL, 0), "");
-
- ignore_result(file.file_.file_.release());
- file.file_.UpdateChecksum();
- }
-}
-#endif // defined(OS_POSIX)
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index a8c5d44f9c7..ffa79a45f42 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -24,8 +24,6 @@
#if defined(OS_MACOSX)
#include <AvailabilityMacros.h>
#include "base/mac/foundation_util.h"
-#elif !defined(OS_CHROMEOS) && defined(USE_GLIB)
-#include <glib.h> // for g_get_home_dir()
#endif
#include "base/basictypes.h"
@@ -478,16 +476,6 @@ FilePath GetHomeDir() {
#if defined(OS_ANDROID)
DLOG(WARNING) << "OS_ANDROID: Home directory lookup not yet implemented.";
-#elif defined(USE_GLIB) && !defined(OS_CHROMEOS)
- // g_get_home_dir calls getpwent, which can fall through to LDAP calls so
- // this may do I/O. However, it should be rare that $HOME is not defined and
- // this is typically called from the path service which has no threading
- // restrictions. The path service will cache the result which limits the
- // badness of blocking on I/O. As a result, we don't have a thread
- // restriction here.
- home_dir = g_get_home_dir();
- if (home_dir && home_dir[0])
- return FilePath(home_dir);
#endif
FilePath rv;
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index 52581f8ce43..933cb7f46e5 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -134,7 +134,7 @@ class ReparsePoint {
ReparsePoint(const FilePath& source, const FilePath& target) {
dir_.Set(
::CreateFile(source.value().c_str(),
- FILE_ALL_ACCESS,
+ GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL,
OPEN_EXISTING,
@@ -244,15 +244,6 @@ std::wstring ReadTextFile(const FilePath& filename) {
return std::wstring(contents);
}
-#if defined(OS_WIN)
-uint64 FileTimeAsUint64(const FILETIME& ft) {
- ULARGE_INTEGER u;
- u.LowPart = ft.dwLowDateTime;
- u.HighPart = ft.dwHighDateTime;
- return u.QuadPart;
-}
-#endif
-
TEST_F(FileUtilTest, FileAndDirectorySize) {
// Create three files of 20, 30 and 3 chars (utf8). ComputeDirectorySize
// should return 53 bytes.
@@ -435,8 +426,8 @@ TEST_F(FileUtilTest, NormalizeFilePathReparsePoints) {
TEST_F(FileUtilTest, DevicePathToDriveLetter) {
// Get a drive letter.
- std::wstring real_drive_letter = temp_dir_.path().value().substr(0, 2);
- StringToUpperASCII(&real_drive_letter);
+ string16 real_drive_letter =
+ ToUpperASCII(temp_dir_.path().value().substr(0, 2));
if (!isalpha(real_drive_letter[0]) || ':' != real_drive_letter[1]) {
LOG(ERROR) << "Can't get a drive letter to test with.";
return;
diff --git a/chromium/base/files/important_file_writer.cc b/chromium/base/files/important_file_writer.cc
index 814fc7b9add..1529107bdf3 100644
--- a/chromium/base/files/important_file_writer.cc
+++ b/chromium/base/files/important_file_writer.cc
@@ -16,6 +16,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/numerics/safe_conversions.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/task_runner.h"
@@ -47,8 +48,7 @@ void LogFailure(const FilePath& path, TempFileFailure failure_code,
const std::string& message) {
UMA_HISTOGRAM_ENUMERATION("ImportantFile.TempFileFailures", failure_code,
TEMP_FILE_FAILURE_MAX);
- DPLOG(WARNING) << "temp file failure: " << path.value().c_str()
- << " : " << message;
+ DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message;
}
// Helper function to call WriteFileAtomically() with a scoped_ptr<std::string>.
@@ -72,16 +72,16 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
char path[128];
} file_info;
file_info.data_size = data.size();
- base::strlcpy(file_info.path, path.value().c_str(),
- arraysize(file_info.path));
- base::debug::Alias(&file_info);
+ strlcpy(file_info.path, path.value().c_str(), arraysize(file_info.path));
+ debug::Alias(&file_info);
#endif
+
// Write the data to a temp file then rename to avoid data loss if we crash
// while writing the file. Ensure that the temp file is on the same volume
// as target file, so it can be moved in one step, and that the temp file
// is securely created.
FilePath tmp_file_path;
- if (!base::CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) {
+ if (!CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) {
LogFailure(path, FAILED_CREATING, "could not create temporary file");
return false;
}
@@ -92,29 +92,28 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
return false;
}
- // If this happens in the wild something really bad is going on.
- CHECK_LE(data.length(), static_cast<size_t>(kint32max));
- int bytes_written = tmp_file.Write(0, data.data(),
- static_cast<int>(data.length()));
+ // If this fails in the wild, something really bad is going on.
+ const int data_length = checked_cast<int32_t>(data.length());
+ int bytes_written = tmp_file.Write(0, data.data(), data_length);
bool flush_success = tmp_file.Flush();
tmp_file.Close();
- if (bytes_written < static_cast<int>(data.length())) {
+ if (bytes_written < data_length) {
LogFailure(path, FAILED_WRITING, "error writing, bytes_written=" +
IntToString(bytes_written));
- base::DeleteFile(tmp_file_path, false);
+ DeleteFile(tmp_file_path, false);
return false;
}
if (!flush_success) {
LogFailure(path, FAILED_FLUSHING, "error flushing");
- base::DeleteFile(tmp_file_path, false);
+ DeleteFile(tmp_file_path, false);
return false;
}
- if (!base::ReplaceFile(tmp_file_path, path, NULL)) {
+ if (!ReplaceFile(tmp_file_path, path, nullptr)) {
LogFailure(path, FAILED_RENAMING, "could not rename temporary file");
- base::DeleteFile(tmp_file_path, false);
+ DeleteFile(tmp_file_path, false);
return false;
}
@@ -123,11 +122,21 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
ImportantFileWriter::ImportantFileWriter(
const FilePath& path,
- const scoped_refptr<base::SequencedTaskRunner>& task_runner)
+ const scoped_refptr<SequencedTaskRunner>& task_runner)
+ : ImportantFileWriter(
+ path,
+ task_runner,
+ TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)) {
+}
+
+ImportantFileWriter::ImportantFileWriter(
+ const FilePath& path,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ TimeDelta interval)
: path_(path),
task_runner_(task_runner),
- serializer_(NULL),
- commit_interval_(TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)),
+ serializer_(nullptr),
+ commit_interval_(interval),
weak_factory_(this) {
DCHECK(CalledOnValidThread());
DCHECK(task_runner_);
@@ -147,7 +156,7 @@ bool ImportantFileWriter::HasPendingWrite() const {
void ImportantFileWriter::WriteNow(scoped_ptr<std::string> data) {
DCHECK(CalledOnValidThread());
- if (data->length() > static_cast<size_t>(kint32max)) {
+ if (!IsValueInRangeForNumericType<int32_t>(data->length())) {
NOTREACHED();
return;
}
@@ -185,13 +194,13 @@ void ImportantFileWriter::DoScheduledWrite() {
WriteNow(data.Pass());
} else {
DLOG(WARNING) << "failed to serialize data to be saved in "
- << path_.value().c_str();
+ << path_.value();
}
- serializer_ = NULL;
+ serializer_ = nullptr;
}
void ImportantFileWriter::RegisterOnNextSuccessfulWriteCallback(
- const base::Closure& on_next_successful_write) {
+ const Closure& on_next_successful_write) {
DCHECK(on_next_successful_write_.is_null());
on_next_successful_write_ = on_next_successful_write;
}
@@ -203,7 +212,7 @@ bool ImportantFileWriter::PostWriteTask(const Callback<bool()>& task) {
// suppressing all of those is unrealistic hence we avoid most of them by
// using PostTask() in the typical scenario below.
if (!on_next_successful_write_.is_null()) {
- return base::PostTaskAndReplyWithResult(
+ return PostTaskAndReplyWithResult(
task_runner_.get(),
FROM_HERE,
MakeCriticalClosure(task),
@@ -212,7 +221,7 @@ bool ImportantFileWriter::PostWriteTask(const Callback<bool()>& task) {
}
return task_runner_->PostTask(
FROM_HERE,
- MakeCriticalClosure(base::Bind(IgnoreResult(task))));
+ MakeCriticalClosure(Bind(IgnoreResult(task))));
}
void ImportantFileWriter::ForwardSuccessfulWrite(bool result) {
diff --git a/chromium/base/files/important_file_writer.h b/chromium/base/files/important_file_writer.h
index 99f1a7c6814..7c6160a5f9f 100644
--- a/chromium/base/files/important_file_writer.h
+++ b/chromium/base/files/important_file_writer.h
@@ -62,9 +62,13 @@ class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
// |task_runner| is the SequencedTaskRunner instance where on which we will
// execute file I/O operations.
// All non-const methods, ctor and dtor must be called on the same thread.
- ImportantFileWriter(
- const FilePath& path,
- const scoped_refptr<base::SequencedTaskRunner>& task_runner);
+ ImportantFileWriter(const FilePath& path,
+ const scoped_refptr<SequencedTaskRunner>& task_runner);
+
+ // Same as above, but with a custom commit interval.
+ ImportantFileWriter(const FilePath& path,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ TimeDelta interval);
// You have to ensure that there are no pending writes at the moment
// of destruction.
@@ -77,7 +81,7 @@ class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
bool HasPendingWrite() const;
// Save |data| to target filename. Does not block. If there is a pending write
- // scheduled by ScheduleWrite, it is cancelled.
+ // scheduled by ScheduleWrite(), it is cancelled.
void WriteNow(scoped_ptr<std::string> data);
// Schedule a save to target filename. Data will be serialized and saved
@@ -94,16 +98,12 @@ class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
// Registers |on_next_successful_write| to be called once, on the next
// successful write event. Only one callback can be set at once.
void RegisterOnNextSuccessfulWriteCallback(
- const base::Closure& on_next_successful_write);
+ const Closure& on_next_successful_write);
TimeDelta commit_interval() const {
return commit_interval_;
}
- void set_commit_interval(const TimeDelta& interval) {
- commit_interval_ = interval;
- }
-
private:
// Helper method for WriteNow().
bool PostWriteTask(const Callback<bool()>& task);
@@ -113,22 +113,22 @@ class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
void ForwardSuccessfulWrite(bool result);
// Invoked once and then reset on the next successful write event.
- base::Closure on_next_successful_write_;
+ Closure on_next_successful_write_;
// Path being written to.
const FilePath path_;
// TaskRunner for the thread on which file I/O can be done.
- const scoped_refptr<base::SequencedTaskRunner> task_runner_;
+ const scoped_refptr<SequencedTaskRunner> task_runner_;
// Timer used to schedule commit after ScheduleWrite.
- OneShotTimer<ImportantFileWriter> timer_;
+ OneShotTimer timer_;
// Serializer which will provide the data to be saved.
DataSerializer* serializer_;
// Time delta after which scheduled data will be written to disk.
- TimeDelta commit_interval_;
+ const TimeDelta commit_interval_;
WeakPtrFactory<ImportantFileWriter> weak_factory_;
diff --git a/chromium/base/files/important_file_writer_unittest.cc b/chromium/base/files/important_file_writer_unittest.cc
index d376cdc35ab..71900c93d5e 100644
--- a/chromium/base/files/important_file_writer_unittest.cc
+++ b/chromium/base/files/important_file_writer_unittest.cc
@@ -145,8 +145,9 @@ TEST_F(ImportantFileWriterTest, BasicWithSuccessfulWriteObserver) {
}
TEST_F(ImportantFileWriterTest, ScheduleWrite) {
- ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
- writer.set_commit_interval(TimeDelta::FromMilliseconds(25));
+ ImportantFileWriter writer(file_,
+ ThreadTaskRunnerHandle::Get(),
+ TimeDelta::FromMilliseconds(25));
EXPECT_FALSE(writer.HasPendingWrite());
DataSerializer serializer("foo");
writer.ScheduleWrite(&serializer);
@@ -177,8 +178,9 @@ TEST_F(ImportantFileWriterTest, DoScheduledWrite) {
}
TEST_F(ImportantFileWriterTest, BatchingWrites) {
- ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
- writer.set_commit_interval(TimeDelta::FromMilliseconds(25));
+ ImportantFileWriter writer(file_,
+ ThreadTaskRunnerHandle::Get(),
+ TimeDelta::FromMilliseconds(25));
DataSerializer foo("foo"), bar("bar"), baz("baz");
writer.ScheduleWrite(&foo);
writer.ScheduleWrite(&bar);
diff --git a/chromium/base/guid_unittest.cc b/chromium/base/guid_unittest.cc
index 1485497155f..1c5d393c4d8 100644
--- a/chromium/base/guid_unittest.cc
+++ b/chromium/base/guid_unittest.cc
@@ -43,8 +43,8 @@ TEST(GUIDTest, GUIDCorrectlyFormatted) {
for (int it = 0; it < kIterations; ++it) {
std::string guid = GenerateGUID();
EXPECT_TRUE(IsValidGUID(guid));
- EXPECT_TRUE(IsValidGUID(StringToLowerASCII(guid)));
- EXPECT_TRUE(IsValidGUID(StringToUpperASCII(guid)));
+ EXPECT_TRUE(IsValidGUID(ToLowerASCII(guid)));
+ EXPECT_TRUE(IsValidGUID(ToUpperASCII(guid)));
}
}
diff --git a/chromium/base/i18n/break_iterator.cc b/chromium/base/i18n/break_iterator.cc
index e2ed667572f..bc20fff928a 100644
--- a/chromium/base/i18n/break_iterator.cc
+++ b/chromium/base/i18n/break_iterator.cc
@@ -138,10 +138,14 @@ bool BreakIterator::SetText(const base::char16* text, const size_t length) {
}
bool BreakIterator::IsWord() const {
+ return GetWordBreakStatus() == IS_WORD_BREAK;
+}
+
+BreakIterator::WordBreakStatus BreakIterator::GetWordBreakStatus() const {
int32_t status = ubrk_getRuleStatus(static_cast<UBreakIterator*>(iter_));
if (break_type_ != BREAK_WORD && break_type_ != RULE_BASED)
- return false;
- return status != UBRK_WORD_NONE;
+ return IS_LINE_OR_CHAR_BREAK;
+ return status == UBRK_WORD_NONE ? IS_SKIPPABLE_WORD : IS_WORD_BREAK;
}
bool BreakIterator::IsEndOfWord(size_t position) const {
diff --git a/chromium/base/i18n/break_iterator.h b/chromium/base/i18n/break_iterator.h
index 19fdbe01cbd..9dbac7c59e6 100644
--- a/chromium/base/i18n/break_iterator.h
+++ b/chromium/base/i18n/break_iterator.h
@@ -71,6 +71,19 @@ class BASE_I18N_EXPORT BreakIterator {
RULE_BASED,
};
+ enum WordBreakStatus {
+ // The end of text that the iterator recognizes as word characters.
+ // Non-word characters are things like punctuation and spaces.
+ IS_WORD_BREAK,
+ // Characters that the iterator can skip past, such as punctuation,
+ // whitespace, and, if using RULE_BASED mode, characters from another
+ // character set.
+ IS_SKIPPABLE_WORD,
+ // Only used if not in BREAK_WORD or RULE_BASED mode. This is returned for
+ // newlines, line breaks, and character breaks.
+ IS_LINE_OR_CHAR_BREAK
+ };
+
// Requires |str| to live as long as the BreakIterator does.
BreakIterator(const StringPiece16& str, BreakType break_type);
// Make a rule-based iterator. BreakType == RULE_BASED is implied.
@@ -101,6 +114,20 @@ class BASE_I18N_EXPORT BreakIterator {
// this distinction doesn't apply and it always returns false.
bool IsWord() const;
+ // Under BREAK_WORD mode:
+ // - Returns IS_SKIPPABLE_WORD if non-word characters, such as punctuation or
+ // spaces, are found.
+ // - Returns IS_WORD_BREAK if the break we just hit is the end of a sequence
+ // of word characters.
+ // Under RULE_BASED mode:
+ // - Returns IS_SKIPPABLE_WORD if characters outside the rules' character set
+ // or non-word characters, such as punctuation or spaces, are found.
+ // - Returns IS_WORD_BREAK if the break we just hit is the end of a sequence
+ // of word characters that are in the rules' character set.
+ // Not under BREAK_WORD or RULE_BASED mode:
+ // - Returns IS_LINE_OR_CHAR_BREAK.
+ BreakIterator::WordBreakStatus GetWordBreakStatus() const;
+
// Under BREAK_WORD mode, returns true if |position| is at the end of word or
// at the start of word. It always returns false under BREAK_LINE and
// BREAK_NEWLINE modes.
diff --git a/chromium/base/i18n/break_iterator_unittest.cc b/chromium/base/i18n/break_iterator_unittest.cc
index 220a996b961..c53509148d9 100644
--- a/chromium/base/i18n/break_iterator_unittest.cc
+++ b/chromium/base/i18n/break_iterator_unittest.cc
@@ -369,5 +369,90 @@ TEST(BreakIteratorTest, GetStringPiece) {
EXPECT_EQ(StringPiece16(ASCIIToUTF16("string")), iter.GetStringPiece());
}
+// Make sure that when not in RULE_BASED or BREAK_WORD mode we're getting
+// IS_LINE_OR_CHAR_BREAK.
+TEST(BreakIteratorTest, GetWordBreakStatusBreakLine) {
+ // A string containing the English word "foo", followed by two Khmer
+ // characters, the English word "Can", and then two Russian characters and
+ // punctuation.
+ base::string16 text(
+ base::WideToUTF16(L"foo \x1791\x17C1 \nCan \x041C\x0438..."));
+ BreakIterator iter(text, BreakIterator::BREAK_LINE);
+ ASSERT_TRUE(iter.Init());
+
+ EXPECT_TRUE(iter.Advance());
+ // Finds "foo" and the space.
+ EXPECT_EQ(base::UTF8ToUTF16("foo "), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds the Khmer characters, the next space, and the newline.
+ EXPECT_EQ(base::WideToUTF16(L"\x1791\x17C1 \n"), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds "Can" and the space.
+ EXPECT_EQ(base::UTF8ToUTF16("Can "), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds the Russian characters and periods.
+ EXPECT_EQ(base::WideToUTF16(L"\x041C\x0438..."), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_LINE_OR_CHAR_BREAK);
+ EXPECT_FALSE(iter.Advance());
+}
+
+// Make sure that in BREAK_WORD mode we're getting IS_WORD_BREAK and
+// IS_SKIPPABLE_WORD when we should be. IS_WORD_BREAK should be returned when we
+// finish going over non-punctuation characters while IS_SKIPPABLE_WORD should
+// be returned on punctuation and spaces.
+TEST(BreakIteratorTest, GetWordBreakStatusBreakWord) {
+ // A string containing the English word "foo", followed by two Khmer
+ // characters, the English word "Can", and then two Russian characters and
+ // punctuation.
+ base::string16 text(
+ base::WideToUTF16(L"foo \x1791\x17C1 \nCan \x041C\x0438..."));
+ BreakIterator iter(text, BreakIterator::BREAK_WORD);
+ ASSERT_TRUE(iter.Init());
+
+ EXPECT_TRUE(iter.Advance());
+ // Finds "foo".
+ EXPECT_EQ(base::UTF8ToUTF16("foo"), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds the space, and the Khmer characters.
+ EXPECT_EQ(base::UTF8ToUTF16(" "), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_EQ(base::WideToUTF16(L"\x1791\x17C1"), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds the space and the newline.
+ EXPECT_EQ(base::UTF8ToUTF16(" "), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_EQ(base::UTF8ToUTF16("\n"), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_TRUE(iter.Advance());
+ // Finds "Can".
+ EXPECT_EQ(base::UTF8ToUTF16("Can"), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds the space and the Russian characters.
+ EXPECT_EQ(base::UTF8ToUTF16(" "), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_EQ(base::WideToUTF16(L"\x041C\x0438"), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_WORD_BREAK);
+ EXPECT_TRUE(iter.Advance());
+ // Finds the trailing periods.
+ EXPECT_EQ(base::UTF8ToUTF16("."), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_EQ(base::UTF8ToUTF16("."), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_TRUE(iter.Advance());
+ EXPECT_EQ(base::UTF8ToUTF16("."), iter.GetString());
+ EXPECT_EQ(iter.GetWordBreakStatus(), BreakIterator::IS_SKIPPABLE_WORD);
+ EXPECT_FALSE(iter.Advance());
+}
+
} // namespace i18n
} // namespace base
diff --git a/chromium/base/i18n/case_conversion.h b/chromium/base/i18n/case_conversion.h
index de1f43275c6..0631a800b71 100644
--- a/chromium/base/i18n/case_conversion.h
+++ b/chromium/base/i18n/case_conversion.h
@@ -26,6 +26,10 @@ namespace i18n {
// locale. Use this when comparing general Unicode strings that don't
// necessarily belong in the user's current locale (like commands, protocol
// names, other strings from the web) for case-insensitive equality.
+//
+// Note that case conversions will change the length of the string in some
+// not-uncommon cases. Never assume that the output is the same length as
+// the input.
// Returns the lower case equivalent of string. Uses ICU's current locale.
BASE_I18N_EXPORT string16 ToLower(StringPiece16 string);
diff --git a/chromium/base/i18n/case_conversion_unittest.cc b/chromium/base/i18n/case_conversion_unittest.cc
index dc5bc1fe8b1..ee795bc6e33 100644
--- a/chromium/base/i18n/case_conversion_unittest.cc
+++ b/chromium/base/i18n/case_conversion_unittest.cc
@@ -5,6 +5,7 @@
#include "base/i18n/case_conversion.h"
#include "base/i18n/rtl.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/icu/source/i18n/unicode/usearch.h"
@@ -58,7 +59,7 @@ TEST(CaseConversionTest, TurkishLocaleConversion) {
const string16 expected_lower(WideToUTF16(L"\x69\x131"));
const string16 expected_upper(WideToUTF16(L"\x49\x49"));
- std::string default_locale(uloc_getDefault());
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
string16 result = ToLower(mixed);
@@ -77,8 +78,6 @@ TEST(CaseConversionTest, TurkishLocaleConversion) {
result = ToUpper(mixed);
EXPECT_EQ(expected_upper_turkish, result);
-
- SetICUDefaultLocale(default_locale.data());
}
TEST(CaseConversionTest, FoldCase) {
@@ -97,7 +96,7 @@ TEST(CaseConversionTest, FoldCase) {
const string16 turkish(WideToUTF16(L"\x49\x131"));
const string16 turkish_expected(WideToUTF16(L"\x69\x131"));
- std::string default_locale(uloc_getDefault());
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
EXPECT_EQ(turkish_expected, FoldCase(turkish));
diff --git a/chromium/base/i18n/message_formatter.cc b/chromium/base/i18n/message_formatter.cc
new file mode 100644
index 00000000000..702e51b94aa
--- /dev/null
+++ b/chromium/base/i18n/message_formatter.cc
@@ -0,0 +1,141 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/message_formatter.h"
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/time/time.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/common/unicode/utypes.h"
+#include "third_party/icu/source/i18n/unicode/fmtable.h"
+#include "third_party/icu/source/i18n/unicode/msgfmt.h"
+
+using icu::UnicodeString;
+
+namespace base {
+namespace i18n {
+namespace {
+UnicodeString UnicodeStringFromStringPiece(StringPiece str) {
+ return UnicodeString::fromUTF8(
+ icu::StringPiece(str.data(), base::checked_cast<int32_t>(str.size())));
+}
+} // anonymous namespace
+
+namespace internal {
+MessageArg::MessageArg() : formattable(nullptr) {}
+
+MessageArg::MessageArg(const char* s)
+ : formattable(new icu::Formattable(UnicodeStringFromStringPiece(s))) {}
+
+MessageArg::MessageArg(StringPiece s)
+ : formattable(new icu::Formattable(UnicodeStringFromStringPiece(s))) {}
+
+MessageArg::MessageArg(const std::string& s)
+ : formattable(new icu::Formattable(UnicodeString::fromUTF8(s))) {}
+
+MessageArg::MessageArg(const string16& s)
+ : formattable(new icu::Formattable(UnicodeString(s.data(), s.size()))) {}
+
+MessageArg::MessageArg(int i) : formattable(new icu::Formattable(i)) {}
+
+MessageArg::MessageArg(int64_t i) : formattable(new icu::Formattable(i)) {}
+
+MessageArg::MessageArg(double d) : formattable(new icu::Formattable(d)) {}
+
+MessageArg::MessageArg(const Time& t)
+ : formattable(new icu::Formattable(static_cast<UDate>(t.ToJsTime()))) {}
+
+MessageArg::~MessageArg() {}
+
+// Tests if this argument has a value, and if so increments *count.
+bool MessageArg::has_value(int *count) const {
+ if (formattable == nullptr)
+ return false;
+
+ ++*count;
+ return true;
+}
+
+} // namespace internal
+
+string16 MessageFormatter::FormatWithNumberedArgs(
+ StringPiece16 msg,
+ const internal::MessageArg& arg0,
+ const internal::MessageArg& arg1,
+ const internal::MessageArg& arg2,
+ const internal::MessageArg& arg3,
+ const internal::MessageArg& arg4,
+ const internal::MessageArg& arg5,
+ const internal::MessageArg& arg6) {
+ int32_t args_count = 0;
+ icu::Formattable args[] = {
+ arg0.has_value(&args_count) ? *arg0.formattable : icu::Formattable(),
+ arg1.has_value(&args_count) ? *arg1.formattable : icu::Formattable(),
+ arg2.has_value(&args_count) ? *arg2.formattable : icu::Formattable(),
+ arg3.has_value(&args_count) ? *arg3.formattable : icu::Formattable(),
+ arg4.has_value(&args_count) ? *arg4.formattable : icu::Formattable(),
+ arg5.has_value(&args_count) ? *arg5.formattable : icu::Formattable(),
+ arg6.has_value(&args_count) ? *arg6.formattable : icu::Formattable(),
+ };
+
+ UnicodeString msg_string(msg.data(), msg.size());
+ UErrorCode error = U_ZERO_ERROR;
+ icu::MessageFormat format(msg_string, error);
+ icu::UnicodeString formatted;
+ icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
+ format.format(args, args_count, formatted, ignore, error);
+ if (U_FAILURE(error)) {
+ LOG(ERROR) << "MessageFormat(" << msg.as_string() << ") failed with "
+ << u_errorName(error);
+ return string16();
+ }
+ return string16(formatted.getBuffer(), formatted.length());
+}
+
+string16 MessageFormatter::FormatWithNamedArgs(
+ StringPiece16 msg,
+ StringPiece name0, const internal::MessageArg& arg0,
+ StringPiece name1, const internal::MessageArg& arg1,
+ StringPiece name2, const internal::MessageArg& arg2,
+ StringPiece name3, const internal::MessageArg& arg3,
+ StringPiece name4, const internal::MessageArg& arg4,
+ StringPiece name5, const internal::MessageArg& arg5,
+ StringPiece name6, const internal::MessageArg& arg6) {
+ icu::UnicodeString names[] = {
+ UnicodeStringFromStringPiece(name0),
+ UnicodeStringFromStringPiece(name1),
+ UnicodeStringFromStringPiece(name2),
+ UnicodeStringFromStringPiece(name3),
+ UnicodeStringFromStringPiece(name4),
+ UnicodeStringFromStringPiece(name5),
+ UnicodeStringFromStringPiece(name6),
+ };
+ int32_t args_count = 0;
+ icu::Formattable args[] = {
+ arg0.has_value(&args_count) ? *arg0.formattable : icu::Formattable(),
+ arg1.has_value(&args_count) ? *arg1.formattable : icu::Formattable(),
+ arg2.has_value(&args_count) ? *arg2.formattable : icu::Formattable(),
+ arg3.has_value(&args_count) ? *arg3.formattable : icu::Formattable(),
+ arg4.has_value(&args_count) ? *arg4.formattable : icu::Formattable(),
+ arg5.has_value(&args_count) ? *arg5.formattable : icu::Formattable(),
+ arg6.has_value(&args_count) ? *arg6.formattable : icu::Formattable(),
+ };
+
+ UnicodeString msg_string(msg.data(), msg.size());
+ UErrorCode error = U_ZERO_ERROR;
+ icu::MessageFormat format(msg_string, error);
+
+ icu::UnicodeString formatted;
+ format.format(names, args, args_count, formatted, error);
+ if (U_FAILURE(error)) {
+ LOG(ERROR) << "MessageFormat(" << msg.as_string() << ") failed with "
+ << u_errorName(error);
+ return string16();
+ }
+ return string16(formatted.getBuffer(), formatted.length());
+}
+
+} // namespace i18n
+} // namespace base
diff --git a/chromium/base/i18n/message_formatter.h b/chromium/base/i18n/message_formatter.h
new file mode 100644
index 00000000000..bcdc3bc9775
--- /dev/null
+++ b/chromium/base/i18n/message_formatter.h
@@ -0,0 +1,111 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_MESSAGE_FORMATTER_H_
+#define BASE_I18N_MESSAGE_FORMATTER_H_
+
+#include <stdint.h>
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "third_party/icu/source/common/unicode/uversion.h"
+
+U_NAMESPACE_BEGIN
+class Formattable;
+U_NAMESPACE_END
+
+namespace base {
+
+class Time;
+
+namespace i18n {
+
+class MessageFormatter;
+
+namespace internal {
+
+class BASE_I18N_EXPORT MessageArg {
+ public:
+ MessageArg(const char* s);
+ MessageArg(StringPiece s);
+ MessageArg(const std::string& s);
+ MessageArg(const string16& s);
+ MessageArg(int i);
+ MessageArg(int64_t i);
+ MessageArg(double d);
+ MessageArg(const Time& t);
+ ~MessageArg();
+
+ private:
+ friend class base::i18n::MessageFormatter;
+ MessageArg();
+ // Tests if this argument has a value, and if so increments *count.
+ bool has_value(int* count) const;
+ scoped_ptr<icu::Formattable> formattable;
+ DISALLOW_COPY_AND_ASSIGN(MessageArg);
+};
+
+} // namespace internal
+
+// Message Formatter with the ICU message format syntax support.
+// It can format strings (UTF-8 and UTF-16), numbers and base::Time with
+// plural, gender and other 'selectors' support. This is handy if you
+// have multiple parameters of differnt types and some of them require
+// plural or gender/selector support.
+//
+// To use this API for locale-sensitive formatting, retrieve a 'message
+// template' in the ICU message format from a message bundle (e.g. with
+// l10n_util::GetStringUTF16()) and pass it to FormatWith{Named,Numbered}Args.
+//
+// MessageFormat specs:
+// http://icu-project.org/apiref/icu4j/com/ibm/icu/text/MessageFormat.html
+// http://icu-project.org/apiref/icu4c/classicu_1_1DecimalFormat.html#details
+// Examples:
+// http://userguide.icu-project.org/formatparse/messages
+// message_formatter_unittest.cc
+// go/plurals inside Google.
+// TODO(jshin): Document this API at sites.chromium.org and add a reference
+// here.
+
+class BASE_I18N_EXPORT MessageFormatter {
+ public:
+ static string16 FormatWithNamedArgs(
+ StringPiece16 msg,
+ StringPiece name0 = StringPiece(),
+ const internal::MessageArg& arg0 = internal::MessageArg(),
+ StringPiece name1 = StringPiece(),
+ const internal::MessageArg& arg1 = internal::MessageArg(),
+ StringPiece name2 = StringPiece(),
+ const internal::MessageArg& arg2 = internal::MessageArg(),
+ StringPiece name3 = StringPiece(),
+ const internal::MessageArg& arg3 = internal::MessageArg(),
+ StringPiece name4 = StringPiece(),
+ const internal::MessageArg& arg4 = internal::MessageArg(),
+ StringPiece name5 = StringPiece(),
+ const internal::MessageArg& arg5 = internal::MessageArg(),
+ StringPiece name6 = StringPiece(),
+ const internal::MessageArg& arg6 = internal::MessageArg());
+
+ static string16 FormatWithNumberedArgs(
+ StringPiece16 msg,
+ const internal::MessageArg& arg0 = internal::MessageArg(),
+ const internal::MessageArg& arg1 = internal::MessageArg(),
+ const internal::MessageArg& arg2 = internal::MessageArg(),
+ const internal::MessageArg& arg3 = internal::MessageArg(),
+ const internal::MessageArg& arg4 = internal::MessageArg(),
+ const internal::MessageArg& arg5 = internal::MessageArg(),
+ const internal::MessageArg& arg6 = internal::MessageArg());
+
+ private:
+ MessageFormatter() {}
+ DISALLOW_COPY_AND_ASSIGN(MessageFormatter);
+};
+
+} // namespace i18n
+} // namespace base
+
+#endif // BASE_I18N_MESSAGE_FORMATTER_H_
diff --git a/chromium/base/i18n/message_formatter_unittest.cc b/chromium/base/i18n/message_formatter_unittest.cc
new file mode 100644
index 00000000000..85e2e171cb1
--- /dev/null
+++ b/chromium/base/i18n/message_formatter_unittest.cc
@@ -0,0 +1,180 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/message_formatter.h"
+
+#include "base/i18n/rtl.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
+#include "third_party/icu/source/i18n/unicode/datefmt.h"
+#include "third_party/icu/source/i18n/unicode/msgfmt.h"
+
+typedef testing::Test MessageFormatterTest;
+
+namespace base {
+namespace i18n {
+
+class MessageFormatterTest : public testing::Test {
+ protected:
+ MessageFormatterTest() {
+ original_locale_ = GetConfiguredLocale();
+ SetICUDefaultLocale("en-US");
+ }
+ ~MessageFormatterTest() override {
+ SetICUDefaultLocale(original_locale_);
+ }
+
+ private:
+ std::string original_locale_;
+};
+
+namespace {
+
+void AppendFormattedDateTime(const scoped_ptr<icu::DateFormat>& df,
+ const Time& now, std::string* result) {
+ icu::UnicodeString formatted;
+ df->format(static_cast<UDate>(now.ToJsTime()), formatted).
+ toUTF8String(*result);
+}
+
+} // namespace
+
+TEST_F(MessageFormatterTest, PluralNamedArgs) {
+ const string16 pattern = ASCIIToUTF16(
+ "{num_people, plural, "
+ "=0 {I met nobody in {place}.}"
+ "=1 {I met a person in {place}.}"
+ "other {I met # people in {place}.}}");
+
+ std::string result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 0, "place", "Paris"));
+ EXPECT_EQ("I met nobody in Paris.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 1, "place", "Paris"));
+ EXPECT_EQ("I met a person in Paris.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 5, "place", "Paris"));
+ EXPECT_EQ("I met 5 people in Paris.", result);
+}
+
+TEST_F(MessageFormatterTest, PluralNamedArgsWithOffset) {
+ const string16 pattern = ASCIIToUTF16(
+ "{num_people, plural, offset:1 "
+ "=0 {I met nobody in {place}.}"
+ "=1 {I met {person} in {place}.}"
+ "=2 {I met {person} and one other person in {place}.}"
+ "=13 {I met {person} and a dozen other people in {place}.}"
+ "other {I met {person} and # other people in {place}.}}");
+
+ std::string result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 0, "place", "Paris"));
+ EXPECT_EQ("I met nobody in Paris.", result);
+ // {person} is ignored if {num_people} is 0.
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 0, "place", "Paris", "person", "Peter"));
+ EXPECT_EQ("I met nobody in Paris.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 1, "place", "Paris", "person", "Peter"));
+ EXPECT_EQ("I met Peter in Paris.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 2, "place", "Paris", "person", "Peter"));
+ EXPECT_EQ("I met Peter and one other person in Paris.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 13, "place", "Paris", "person", "Peter"));
+ EXPECT_EQ("I met Peter and a dozen other people in Paris.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNamedArgs(
+ pattern, "num_people", 50, "place", "Paris", "person", "Peter"));
+ EXPECT_EQ("I met Peter and 49 other people in Paris.", result);
+}
+
+TEST_F(MessageFormatterTest, PluralNumberedArgs) {
+ const string16 pattern = ASCIIToUTF16(
+ "{1, plural, "
+ "=1 {The cert for {0} expired yesterday.}"
+ "=7 {The cert for {0} expired a week ago.}"
+ "other {The cert for {0} expired # days ago.}}");
+
+ std::string result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "example.com", 1));
+ EXPECT_EQ("The cert for example.com expired yesterday.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "example.com", 7));
+ EXPECT_EQ("The cert for example.com expired a week ago.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "example.com", 15));
+ EXPECT_EQ("The cert for example.com expired 15 days ago.", result);
+}
+
+TEST_F(MessageFormatterTest, PluralNumberedArgsWithDate) {
+ const string16 pattern = ASCIIToUTF16(
+ "{1, plural, "
+ "=1 {The cert for {0} expired yesterday. Today is {2,date,full}}"
+ "other {The cert for {0} expired # days ago. Today is {2,date,full}}}");
+
+ base::Time now = base::Time::Now();
+ using icu::DateFormat;
+ scoped_ptr<DateFormat> df(DateFormat::createDateInstance(DateFormat::FULL));
+ std::string second_sentence = " Today is ";
+ AppendFormattedDateTime(df, now, &second_sentence);
+
+ std::string result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "example.com", 1, now));
+ EXPECT_EQ("The cert for example.com expired yesterday." + second_sentence,
+ result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "example.com", 15, now));
+ EXPECT_EQ("The cert for example.com expired 15 days ago." + second_sentence,
+ result);
+}
+
+TEST_F(MessageFormatterTest, DateTimeAndNumber) {
+ // Note that using 'mph' for all locales is not a good i18n practice.
+ const string16 pattern = ASCIIToUTF16(
+ "At {0,time, short} on {0,date, medium}, "
+ "there was {1} at building {2,number,integer}. "
+ "The speed of the wind was {3,number,###.#} mph.");
+
+ using icu::DateFormat;
+ scoped_ptr<DateFormat> tf(DateFormat::createTimeInstance(DateFormat::SHORT));
+ scoped_ptr<DateFormat> df(DateFormat::createDateInstance(DateFormat::MEDIUM));
+
+ base::Time now = base::Time::Now();
+ std::string expected = "At ";
+ AppendFormattedDateTime(tf, now, &expected);
+ expected.append(" on ");
+ AppendFormattedDateTime(df, now, &expected);
+ expected.append(", there was an explosion at building 3. "
+ "The speed of the wind was 37.4 mph.");
+
+ EXPECT_EQ(expected, UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, now, "an explosion", 3, 37.413)));
+}
+
+TEST_F(MessageFormatterTest, SelectorSingleOrMultiple) {
+ const string16 pattern = ASCIIToUTF16(
+ "{0, select,"
+ "single {Select a file to upload.}"
+ "multiple {Select files to upload.}"
+ "other {UNUSED}}");
+
+ std::string result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "single"));
+ EXPECT_EQ("Select a file to upload.", result);
+ result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "multiple"));
+ EXPECT_EQ("Select files to upload.", result);
+
+ // fallback if a parameter is not selectors specified in the message pattern.
+ result = UTF16ToASCII(MessageFormatter::FormatWithNumberedArgs(
+ pattern, "foobar"));
+ EXPECT_EQ("UNUSED", result);
+}
+
+} // namespace i18n
+} // namespace base
diff --git a/chromium/base/i18n/number_formatting_unittest.cc b/chromium/base/i18n/number_formatting_unittest.cc
index 3b0718d454f..dc6de2bbb6d 100644
--- a/chromium/base/i18n/number_formatting_unittest.cc
+++ b/chromium/base/i18n/number_formatting_unittest.cc
@@ -7,7 +7,9 @@
#include "base/i18n/number_formatting.h"
#include "base/i18n/rtl.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/icu/source/i18n/unicode/usearch.h"
namespace base {
namespace {
@@ -27,6 +29,8 @@ TEST(NumberFormattingTest, FormatNumber) {
{-42, "-42", "-42"},
};
+ test::ScopedRestoreICUDefaultLocale restore_locale;
+
for (size_t i = 0; i < arraysize(cases); ++i) {
i18n::SetICUDefaultLocale("en");
testing::ResetFormatters();
@@ -72,6 +76,7 @@ TEST(NumberFormattingTest, FormatDouble) {
{-42.7, 3, "-42.700", "-42,700"},
};
+ test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < arraysize(cases); ++i) {
i18n::SetICUDefaultLocale("en");
testing::ResetFormatters();
diff --git a/chromium/base/i18n/rtl.cc b/chromium/base/i18n/rtl.cc
index 1cccae28937..ac9589cb325 100644
--- a/chromium/base/i18n/rtl.cc
+++ b/chromium/base/i18n/rtl.cc
@@ -4,8 +4,11 @@
#include "base/i18n/rtl.h"
+#include <algorithm>
+
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "base/strings/utf_string_conversions.h"
@@ -14,6 +17,10 @@
#include "third_party/icu/source/common/unicode/uscript.h"
#include "third_party/icu/source/i18n/unicode/coll.h"
+#if defined(OS_IOS)
+#include "base/ios/ios_util.h"
+#endif
+
namespace {
// Extract language, country and variant, but ignore keywords. For example,
@@ -31,11 +38,8 @@ std::string GetLocaleString(const icu::Locale& locale) {
result += country;
}
- if (variant != NULL && *variant != '\0') {
- std::string variant_str(variant);
- base::StringToLowerASCII(&variant_str);
- result += '@' + variant_str;
- }
+ if (variant != NULL && *variant != '\0')
+ result += '@' + base::ToLowerASCII(variant);
return result;
}
@@ -125,12 +129,38 @@ bool IsRTL() {
bool ICUIsRTL() {
if (g_icu_text_direction == UNKNOWN_DIRECTION) {
const icu::Locale& locale = icu::Locale::getDefault();
- g_icu_text_direction = GetTextDirectionForLocale(locale.getName());
+ g_icu_text_direction = GetTextDirectionForLocaleInStartUp(locale.getName());
}
return g_icu_text_direction == RIGHT_TO_LEFT;
}
+TextDirection GetTextDirectionForLocaleInStartUp(const char* locale_name) {
+// On iOS, check for RTL forcing.
+#if defined(OS_IOS)
+ if (ios::IsInForcedRTL())
+ return RIGHT_TO_LEFT;
+#endif
+
+ // This list needs to be updated in alphabetical order if we add more RTL
+ // locales.
+ static const char* kRTLLanguageCodes[] = {"ar", "fa", "he", "iw", "ur"};
+ std::vector<StringPiece> locale_split =
+ SplitStringPiece(locale_name, "-_", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+ const StringPiece& language_code = locale_split[0];
+ if (std::binary_search(kRTLLanguageCodes,
+ kRTLLanguageCodes + arraysize(kRTLLanguageCodes),
+ language_code))
+ return RIGHT_TO_LEFT;
+ return LEFT_TO_RIGHT;
+}
+
TextDirection GetTextDirectionForLocale(const char* locale_name) {
+ // On iOS, check for RTL forcing.
+#if defined(OS_IOS)
+ if (ios::IsInForcedRTL())
+ return RIGHT_TO_LEFT;
+#endif
+
UErrorCode status = U_ZERO_ERROR;
ULayoutType layout_dir = uloc_getCharacterOrientation(locale_name, &status);
DCHECK(U_SUCCESS(status));
diff --git a/chromium/base/i18n/rtl.h b/chromium/base/i18n/rtl.h
index 9b9a0dcd31c..bba93ce8f56 100644
--- a/chromium/base/i18n/rtl.h
+++ b/chromium/base/i18n/rtl.h
@@ -61,6 +61,12 @@ BASE_I18N_EXPORT bool IsRTL();
BASE_I18N_EXPORT bool ICUIsRTL();
// Returns the text direction for |locale_name|.
+// As a startup optimization, this method checks the locale against a list of
+// Chrome-supported RTL locales.
+BASE_I18N_EXPORT TextDirection
+GetTextDirectionForLocaleInStartUp(const char* locale_name);
+
+// Returns the text direction for |locale_name|.
BASE_I18N_EXPORT TextDirection GetTextDirectionForLocale(
const char* locale_name);
diff --git a/chromium/base/i18n/rtl_unittest.cc b/chromium/base/i18n/rtl_unittest.cc
index 87ac87d242a..6deaf34582a 100644
--- a/chromium/base/i18n/rtl_unittest.cc
+++ b/chromium/base/i18n/rtl_unittest.cc
@@ -10,6 +10,7 @@
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
#include "third_party/icu/source/i18n/unicode/usearch.h"
@@ -305,6 +306,7 @@ TEST_F(RTLTest, WrapString) {
const bool was_rtl = IsRTL();
+ test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
SetRTL(!IsRTL());
@@ -352,6 +354,7 @@ TEST_F(RTLTest, GetDisplayStringInLTRDirectionality) {
const bool was_rtl = IsRTL();
+ test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
SetRTL(!IsRTL());
@@ -399,6 +402,28 @@ TEST_F(RTLTest, GetTextDirection) {
EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocale("ja"));
}
+TEST_F(RTLTest, GetTextDirectionForLocaleInStartUp) {
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("ar"));
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("ar_EG"));
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("he"));
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("he_IL"));
+ // iw is an obsolete code for Hebrew.
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("iw"));
+ // Although we're not yet localized to Farsi and Urdu, we
+ // do have the text layout direction information for them.
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("fa"));
+ EXPECT_EQ(RIGHT_TO_LEFT, GetTextDirectionForLocaleInStartUp("ur"));
+ EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("en"));
+ // Chinese in China with '-'.
+ EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("zh-CN"));
+ // Filipino : 3-letter code
+ EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("fil"));
+ // Russian
+ EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("ru"));
+ // Japanese that uses multiple scripts
+ EXPECT_EQ(LEFT_TO_RIGHT, GetTextDirectionForLocaleInStartUp("ja"));
+}
+
TEST_F(RTLTest, UnadjustStringForLocaleDirection) {
// These test strings are borrowed from WrapPathWithLTRFormatting
const wchar_t* cases[] = {
@@ -416,6 +441,7 @@ TEST_F(RTLTest, UnadjustStringForLocaleDirection) {
const bool was_rtl = IsRTL();
+ test::ScopedRestoreICUDefaultLocale restore_locale;
for (size_t i = 0; i < 2; ++i) {
// Toggle the application default text direction (to try each direction).
SetRTL(!IsRTL());
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index 9f152c0b2d9..e64acf121de 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -7,6 +7,7 @@
#include "base/i18n/rtl.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/test/icu_test_util.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/icu/source/common/unicode/uversion.h"
@@ -41,6 +42,7 @@ base::string16 GetShortTimeZone(const Time& time) {
TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault12h) {
// Test for a locale defaulted to 12h clock.
// As an instance, we use third_party/icu/source/data/locales/en.txt.
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
Time time(Time::FromLocalExploded(kTestDateTimeExploded));
@@ -76,6 +78,7 @@ TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault12h) {
TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault24h) {
// Test for a locale defaulted to 24h clock.
// As an instance, we use third_party/icu/source/data/locales/en_GB.txt.
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_GB");
Time time(Time::FromLocalExploded(kTestDateTimeExploded));
@@ -111,6 +114,7 @@ TEST(TimeFormattingTest, TimeFormatTimeOfDayDefault24h) {
TEST(TimeFormattingTest, TimeFormatTimeOfDayJP) {
// Test for a locale that uses different mark than "AM" and "PM".
// As an instance, we use third_party/icu/source/data/locales/ja.txt.
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("ja_JP");
Time time(Time::FromLocalExploded(kTestDateTimeExploded));
@@ -144,6 +148,7 @@ TEST(TimeFormattingTest, TimeFormatTimeOfDayJP) {
TEST(TimeFormattingTest, TimeFormatDateUS) {
// See third_party/icu/source/data/locales/en.txt.
// The date patterns are "EEEE, MMMM d, y", "MMM d, y", and "M/d/yy".
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_US");
Time time(Time::FromLocalExploded(kTestDateTimeExploded));
@@ -166,6 +171,7 @@ TEST(TimeFormattingTest, TimeFormatDateUS) {
TEST(TimeFormattingTest, TimeFormatDateGB) {
// See third_party/icu/source/data/locales/en_GB.txt.
// The date patterns are "EEEE, d MMMM y", "d MMM y", and "dd/MM/yyyy".
+ test::ScopedRestoreICUDefaultLocale restore_locale;
i18n::SetICUDefaultLocale("en_GB");
Time time(Time::FromLocalExploded(kTestDateTimeExploded));
diff --git a/chromium/base/id_map.h b/chromium/base/id_map.h
index 852c1380471..b1abdfdcee4 100644
--- a/chromium/base/id_map.h
+++ b/chromium/base/id_map.h
@@ -5,9 +5,9 @@
#ifndef BASE_ID_MAP_H_
#define BASE_ID_MAP_H_
+#include <stdint.h>
#include <set>
-#include "base/basictypes.h"
#include "base/containers/hash_tables.h"
#include "base/logging.h"
#include "base/threading/non_thread_safe.h"
@@ -30,10 +30,12 @@ enum IDMapOwnershipSemantics {
//
// This class does not have a virtual destructor, do not inherit from it when
// ownership semantics are set to own because pointers will leak.
-template<typename T, IDMapOwnershipSemantics OS = IDMapExternalPointer>
+template <typename T,
+ IDMapOwnershipSemantics OS = IDMapExternalPointer,
+ typename K = int32_t>
class IDMap : public base::NonThreadSafe {
public:
- typedef int32 KeyType;
+ using KeyType = K;
private:
typedef base::hash_map<KeyType, T*> HashTable;
@@ -249,10 +251,8 @@ class IDMap : public base::NonThreadSafe {
void Compact() {
DCHECK_EQ(0, iteration_depth_);
- for (std::set<KeyType>::const_iterator i = removed_ids_.begin();
- i != removed_ids_.end(); ++i) {
- Remove(*i);
- }
+ for (const auto& i : removed_ids_)
+ Remove(i);
removed_ids_.clear();
}
diff --git a/chromium/base/id_map_unittest.cc b/chromium/base/id_map_unittest.cc
index a9fb2b9decd..2cd63093805 100644
--- a/chromium/base/id_map_unittest.cc
+++ b/chromium/base/id_map_unittest.cc
@@ -355,4 +355,16 @@ TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
EXPECT_EQ(owned_del_count, kCount);
}
+TEST(IDMapTest, Int64KeyType) {
+ IDMap<TestObject, IDMapExternalPointer, int64_t> map;
+ TestObject obj1;
+ const int64_t kId1 = 999999999999999999;
+
+ map.AddWithID(&obj1, kId1);
+ EXPECT_EQ(&obj1, map.Lookup(kId1));
+
+ map.Remove(kId1);
+ EXPECT_TRUE(map.IsEmpty());
+}
+
} // namespace
diff --git a/chromium/base/ios/ios_util.h b/chromium/base/ios/ios_util.h
index 688fbf3ba9f..9f65339ef0d 100644
--- a/chromium/base/ios/ios_util.h
+++ b/chromium/base/ios/ios_util.h
@@ -20,6 +20,13 @@ BASE_EXPORT bool IsRunningOnIOS9OrLater();
// Returns whether the operating system is at the given version or later.
BASE_EXPORT bool IsRunningOnOrLater(int32 major, int32 minor, int32 bug_fix);
+// Returns whether iOS is signalling that an RTL text direction should be used
+// regardless of the current locale. This should not return true if the current
+// language is a "real" RTL language such as Arabic or Urdu; it should only
+// return true in cases where the RTL text direction has been forced (for
+// example by using the "RTL Psuedolanguage" option when launching from XCode).
+BASE_EXPORT bool IsInForcedRTL();
+
} // namespace ios
} // namespace base
diff --git a/chromium/base/ios/ios_util.mm b/chromium/base/ios/ios_util.mm
index d9200450950..554a2029c80 100644
--- a/chromium/base/ios/ios_util.mm
+++ b/chromium/base/ios/ios_util.mm
@@ -4,6 +4,8 @@
#include "base/ios/ios_util.h"
+#import <Foundation/Foundation.h>
+
#include "base/sys_info.h"
namespace {
@@ -39,5 +41,10 @@ bool IsRunningOnOrLater(int32 major, int32 minor, int32 bug_fix) {
return true;
}
+bool IsInForcedRTL() {
+ NSUserDefaults* defaults = [NSUserDefaults standardUserDefaults];
+ return [defaults boolForKey:@"NSForceRightToLeftWritingDirection"];
+}
+
} // namespace ios
} // namespace base
diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h
index b4d0b1bf977..4e23beb9289 100644
--- a/chromium/base/json/json_parser.h
+++ b/chromium/base/json/json_parser.h
@@ -10,31 +10,14 @@
#include "base/base_export.h"
#include "base/basictypes.h"
#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
#include "base/json/json_reader.h"
#include "base/strings/string_piece.h"
-#if !defined(OS_CHROMEOS)
-#include "base/gtest_prod_util.h"
-#endif
-
namespace base {
-class Value;
-}
-#if defined(OS_CHROMEOS)
-// Chromium and Chromium OS check out gtest to different places, so this is
-// unable to compile on both if gtest_prod.h is included here. Instead, include
-// its only contents -- this will need to be updated if the macro ever changes.
-#define FRIEND_TEST(test_case_name, test_name)\
-friend class test_case_name##_##test_name##_Test
-
-#define FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
- FRIEND_TEST(test_case_name, test_name); \
- FRIEND_TEST(test_case_name, DISABLED_##test_name); \
- FRIEND_TEST(test_case_name, FLAKY_##test_name)
-#endif // OS_CHROMEOS
+class Value;
-namespace base {
namespace internal {
class JSONParserTest;
diff --git a/chromium/base/json/string_escape.cc b/chromium/base/json/string_escape.cc
index 469f9f98c03..f5d6a760218 100644
--- a/chromium/base/json/string_escape.cc
+++ b/chromium/base/json/string_escape.cc
@@ -59,6 +59,14 @@ bool EscapeSpecialCodePoint(uint32 code_point, std::string* dest) {
case '<':
dest->append("\\u003C");
break;
+ // Escape the "Line Separator" and "Paragraph Separator" characters, since
+ // they should be treated like a new line \r or \n.
+ case 0x2028:
+ dest->append("\\u2028");
+ break;
+ case 0x2029:
+ dest->append("\\u2029");
+ break;
default:
return false;
}
diff --git a/chromium/base/json/string_escape_unittest.cc b/chromium/base/json/string_escape_unittest.cc
index 100373fccd3..f7ccafc9cb5 100644
--- a/chromium/base/json/string_escape_unittest.cc
+++ b/chromium/base/json/string_escape_unittest.cc
@@ -21,6 +21,8 @@ TEST(JSONStringEscapeTest, EscapeUTF8) {
{"b\x0f\x7f\xf0\xff!", // \xf0\xff is not a valid UTF-8 unit.
"b\\u000F\x7F\xEF\xBF\xBD\xEF\xBF\xBD!"},
{"c<>d", "c\\u003C>d"},
+ {"Hello\xe2\x80\xa8world", "Hello\\u2028world"},
+ {"\xe2\x80\xa9purple", "\\u2029purple"},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
@@ -79,6 +81,8 @@ TEST(JSONStringEscapeTest, EscapeUTF16) {
"a\\b\\f\\n\\r\\t\\u000B\\u0001\\\\.\\\"z"},
{L"b\x0f\x7f\xf0\xff!", "b\\u000F\x7F\xC3\xB0\xC3\xBF!"},
{L"c<>d", "c\\u003C>d"},
+ {L"Hello\u2028world", "Hello\\u2028world"},
+ {L"\u2029purple", "\\u2029purple"},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
diff --git a/chromium/base/linux_util.cc b/chromium/base/linux_util.cc
index d6cd5040320..d94588f98c6 100644
--- a/chromium/base/linux_util.cc
+++ b/chromium/base/linux_util.cc
@@ -37,7 +37,7 @@ class LinuxDistroHelper {
public:
// Retrieves the Singleton.
static LinuxDistroHelper* GetInstance() {
- return Singleton<LinuxDistroHelper>::get();
+ return base::Singleton<LinuxDistroHelper>::get();
}
// The simple state machine goes from:
@@ -106,7 +106,7 @@ std::string GetLinuxDistro() {
argv.push_back("lsb_release");
argv.push_back("-d");
std::string output;
- base::GetAppOutput(CommandLine(argv), &output);
+ GetAppOutput(CommandLine(argv), &output);
if (output.length() > 0) {
// lsb_release -d should return: Description:<tab>Distro Info
const char field[] = "Description:\t";
@@ -124,8 +124,8 @@ std::string GetLinuxDistro() {
void SetLinuxDistro(const std::string& distro) {
std::string trimmed_distro;
- base::TrimWhitespaceASCII(distro, base::TRIM_ALL, &trimmed_distro);
- base::strlcpy(g_linux_distro, trimmed_distro.c_str(), kDistroSize);
+ TrimWhitespaceASCII(distro, TRIM_ALL, &trimmed_distro);
+ strlcpy(g_linux_distro, trimmed_distro.c_str(), kDistroSize);
}
pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index 71528ad3f4e..dc0b8b2472f 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -7,6 +7,8 @@
#if defined(OS_WIN)
#include <io.h>
#include <windows.h>
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
typedef HANDLE FileHandle;
typedef HANDLE MutexHandle;
// Windows warns on using write(). It prefers _write().
@@ -176,6 +178,9 @@ PathString GetDefaultLogFile() {
#endif
}
+// We don't need locks on Windows for atomically appending to files. The OS
+// provides this functionality.
+#if !defined(OS_WIN)
// This class acts as a wrapper for locking the logging files.
// LoggingLock::Init() should be called from the main thread before any logging
// is done. Then whenever logging, be sure to have a local LoggingLock
@@ -196,48 +201,17 @@ class LoggingLock {
if (initialized)
return;
lock_log_file = lock_log;
- if (lock_log_file == LOCK_LOG_FILE) {
-#if defined(OS_WIN)
- if (!log_mutex) {
- std::wstring safe_name;
- if (new_log_file)
- safe_name = new_log_file;
- else
- safe_name = GetDefaultLogFile();
- // \ is not a legal character in mutex names so we replace \ with /
- std::replace(safe_name.begin(), safe_name.end(), '\\', '/');
- std::wstring t(L"Global\\");
- t.append(safe_name);
- log_mutex = ::CreateMutex(nullptr, FALSE, t.c_str());
-
- if (log_mutex == nullptr) {
-#if DEBUG
- // Keep the error code for debugging
- int error = GetLastError(); // NOLINT
- base::debug::BreakDebugger();
-#endif
- // Return nicely without putting initialized to true.
- return;
- }
- }
-#endif
- } else {
+
+ if (lock_log_file != LOCK_LOG_FILE)
log_lock = new base::internal::LockImpl();
- }
+
initialized = true;
}
private:
static void LockLogging() {
if (lock_log_file == LOCK_LOG_FILE) {
-#if defined(OS_WIN)
- ::WaitForSingleObject(log_mutex, INFINITE);
- // WaitForSingleObject could have returned WAIT_ABANDONED. We don't
- // abort the process here. UI tests might be crashy sometimes,
- // and aborting the test binary only makes the problem worse.
- // We also don't use LOG macros because that might lead to an infinite
- // loop. For more info see http://crbug.com/18028.
-#elif defined(OS_POSIX)
+#if defined(OS_POSIX)
pthread_mutex_lock(&log_mutex);
#endif
} else {
@@ -248,9 +222,7 @@ class LoggingLock {
static void UnlockLogging() {
if (lock_log_file == LOCK_LOG_FILE) {
-#if defined(OS_WIN)
- ReleaseMutex(log_mutex);
-#elif defined(OS_POSIX)
+#if defined(OS_POSIX)
pthread_mutex_unlock(&log_mutex);
#endif
} else {
@@ -265,9 +237,7 @@ class LoggingLock {
// When we don't use a lock, we are using a global mutex. We need to do this
// because LockFileEx is not thread safe.
-#if defined(OS_WIN)
- static MutexHandle log_mutex;
-#elif defined(OS_POSIX)
+#if defined(OS_POSIX)
static pthread_mutex_t log_mutex;
#endif
@@ -282,13 +252,12 @@ base::internal::LockImpl* LoggingLock::log_lock = nullptr;
// static
LogLockingState LoggingLock::lock_log_file = LOCK_LOG_FILE;
-#if defined(OS_WIN)
-// static
-MutexHandle LoggingLock::log_mutex = nullptr;
-#elif defined(OS_POSIX)
+#if defined(OS_POSIX)
pthread_mutex_t LoggingLock::log_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
+#endif // OS_WIN
+
// Called by logging functions to ensure that |g_log_file| is initialized
// and can be used for writing. Returns false if the file could not be
// initialized. |g_log_file| will be nullptr in this case.
@@ -304,12 +273,23 @@ bool InitializeLogFileHandle() {
if ((g_logging_destination & LOG_TO_FILE) != 0) {
#if defined(OS_WIN)
- g_log_file = CreateFile(g_log_file_name->c_str(), GENERIC_WRITE,
+ // The FILE_APPEND_DATA access mask ensures that the file is atomically
+ // appended to across accesses from multiple threads.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa364399(v=vs.85).aspx
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+ g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
// try the current directory
- g_log_file = CreateFile(L".\\debug.log", GENERIC_WRITE,
+ base::FilePath file_path;
+ if (!base::GetCurrentDirectory(&file_path))
+ return false;
+
+ *g_log_file_name = file_path.Append(
+ FILE_PATH_LITERAL("debug.log")).value();
+
+ g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
@@ -317,7 +297,6 @@ bool InitializeLogFileHandle() {
return false;
}
}
- SetFilePointer(g_log_file, 0, 0, FILE_END);
#elif defined(OS_POSIX)
g_log_file = fopen(g_log_file_name->c_str(), "a");
if (g_log_file == nullptr)
@@ -380,8 +359,10 @@ bool BaseInitLoggingImpl(const LoggingSettings& settings) {
if ((g_logging_destination & LOG_TO_FILE) == 0)
return true;
+#if !defined(OS_WIN)
LoggingLock::Init(settings.lock_log, settings.log_file);
LoggingLock logging_lock;
+#endif
// Calling InitLogging twice or after some log call has already opened the
// default log file will re-initialize to the new options.
@@ -468,38 +449,8 @@ void DisplayDebugMessageInDialog(const std::string& str) {
return;
#if defined(OS_WIN)
- // For Windows programs, it's possible that the message loop is
- // messed up on a fatal error, and creating a MessageBox will cause
- // that message loop to be run. Instead, we try to spawn another
- // process that displays its command line. We look for "Debug
- // Message.exe" in the same directory as the application. If it
- // exists, we use it, otherwise, we use a regular message box.
- wchar_t prog_name[MAX_PATH];
- GetModuleFileNameW(nullptr, prog_name, MAX_PATH);
- wchar_t* backslash = wcsrchr(prog_name, '\\');
- if (backslash)
- backslash[1] = 0;
- wcscat_s(prog_name, MAX_PATH, L"debug_message.exe");
-
- std::wstring cmdline = base::UTF8ToWide(str);
- if (cmdline.empty())
- return;
-
- STARTUPINFO startup_info;
- memset(&startup_info, 0, sizeof(startup_info));
- startup_info.cb = sizeof(startup_info);
-
- PROCESS_INFORMATION process_info;
- if (CreateProcessW(prog_name, &cmdline[0], nullptr, nullptr, false, 0,
- nullptr, nullptr, &startup_info, &process_info)) {
- WaitForSingleObject(process_info.hProcess, INFINITE);
- CloseHandle(process_info.hThread);
- CloseHandle(process_info.hProcess);
- } else {
- // debug process broken, let's just do a message box
- MessageBoxW(nullptr, &cmdline[0], L"Fatal error",
- MB_OK | MB_ICONHAND | MB_TOPMOST);
- }
+ MessageBoxW(nullptr, base::UTF8ToUTF16(str).c_str(), L"Fatal error",
+ MB_OK | MB_ICONHAND | MB_TOPMOST);
#else
// We intentionally don't implement a dialog on other platforms.
// You can just look at stderr.
@@ -521,6 +472,12 @@ LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
Init(file, line);
}
+LogMessage::LogMessage(const char* file, int line, const char* condition)
+ : severity_(LOG_FATAL), file_(file), line_(line) {
+ Init(file, line);
+ stream_ << "Check failed: " << condition << ". ";
+}
+
LogMessage::LogMessage(const char* file, int line, std::string* result)
: severity_(LOG_FATAL), file_(file), line_(line) {
Init(file, line);
@@ -537,9 +494,9 @@ LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
}
LogMessage::~LogMessage() {
-#if !defined(NDEBUG) && !defined(OS_NACL) && !defined(__UCLIBC__)
- if (severity_ == LOG_FATAL) {
- // Include a stack trace on a fatal.
+#if !defined(OFFICIAL_BUILD) && !defined(OS_NACL) && !defined(__UCLIBC__)
+ if (severity_ == LOG_FATAL && !base::debug::BeingDebugged()) {
+ // Include a stack trace on a fatal, unless a debugger is attached.
base::debug::StackTrace trace;
stream_ << std::endl; // Newline to separate from log message.
trace.OutputToStream(&stream_);
@@ -597,11 +554,12 @@ LogMessage::~LogMessage() {
// to do this at the same time, there will be a race condition to create
// the lock. This is why InitLogging should be called from the main
// thread at the beginning of execution.
+#if !defined(OS_WIN)
LoggingLock::Init(LOCK_LOG_FILE, nullptr);
LoggingLock logging_lock;
+#endif
if (InitializeLogFileHandle()) {
#if defined(OS_WIN)
- SetFilePointer(g_log_file, 0, 0, SEEK_END);
DWORD num_written;
WriteFile(g_log_file,
static_cast<const void*>(str_newline.c_str()),
@@ -633,7 +591,11 @@ LogMessage::~LogMessage() {
// information, and displaying message boxes when the application is
// hosed can cause additional problems.
#ifndef NDEBUG
- DisplayDebugMessageInDialog(stream_.str());
+ if (!base::debug::BeingDebugged()) {
+ // Displaying a dialog is unnecessary when debugging and can complicate
+ // debugging.
+ DisplayDebugMessageInDialog(stream_.str());
+ }
#endif
// Crash the process to generate a dump.
base::debug::BreakDebugger();
@@ -757,7 +719,9 @@ ErrnoLogMessage::~ErrnoLogMessage() {
#endif // defined(OS_WIN)
void CloseLogFile() {
+#if !defined(OS_WIN)
LoggingLock logging_lock;
+#endif
CloseLogFileUnlocked();
}
@@ -796,6 +760,10 @@ void RawLog(int level, const char* message) {
#undef write
#if defined(OS_WIN)
+bool IsLoggingToFileEnabled() {
+ return g_logging_destination & LOG_TO_FILE;
+}
+
std::wstring GetLogFileFullPath() {
if (g_log_file_name)
return *g_log_file_name;
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index ea096d19f72..f49511832bd 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -426,6 +426,21 @@ const LogSeverity LOG_0 = LOG_ERROR;
#define EAT_STREAM_PARAMETERS \
true ? (void) 0 : ::logging::LogMessageVoidify() & LOG_STREAM(FATAL)
+// Captures the result of a CHECK_EQ (for example) and facilitates testing as a
+// boolean.
+class CheckOpResult {
+ public:
+ // |message| must be null if and only if the check failed.
+ CheckOpResult(std::string* message) : message_(message) {}
+ // Returns true if the check succeeded.
+ operator bool() const { return !message_; }
+ // Returns the message.
+ std::string* message() { return message_; }
+
+ private:
+ std::string* message_;
+};
+
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode.
@@ -436,7 +451,7 @@ const LogSeverity LOG_0 = LOG_ERROR;
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && !defined(OS_ANDROID)
// Make all CHECK functions discard their log strings to reduce code
-// bloat for official release builds.
+// bloat for official release builds (except Android).
// TODO(akalin): This would be more valuable if there were some way to
// remove BreakDebugger() from the backtrace, perhaps by turning it
@@ -470,9 +485,10 @@ const LogSeverity LOG_0 = LOG_ERROR;
#else // _PREFAST_
-#define CHECK(condition) \
- LAZY_STREAM(LOG_STREAM(FATAL), !(condition)) \
- << "Check failed: " #condition ". "
+// Do as much work as possible out of line to reduce inline code size.
+#define CHECK(condition) \
+ LAZY_STREAM(logging::LogMessage(__FILE__, __LINE__, #condition).stream(), \
+ !(condition))
#define PCHECK(condition) \
LAZY_STREAM(PLOG_STREAM(FATAL), !(condition)) \
@@ -482,14 +498,18 @@ const LogSeverity LOG_0 = LOG_ERROR;
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
-//
-// TODO(akalin): Rewrite this so that constructs like if (...)
-// CHECK_EQ(...) else { ... } work properly.
-#define CHECK_OP(name, op, val1, val2) \
- if (std::string* _result = \
- logging::Check##name##Impl((val1), (val2), \
- #val1 " " #op " " #val2)) \
- logging::LogMessage(__FILE__, __LINE__, _result).stream()
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+// CHECK_EQ(2, a);
+#define CHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (logging::CheckOpResult true_if_passed = \
+ logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2)) \
+ ; \
+ else \
+ logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
#endif
@@ -665,12 +685,20 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
// Helper macro for binary operators.
// Don't use this macro directly in your code, use DCHECK_EQ et al below.
-#define DCHECK_OP(name, op, val1, val2) \
- if (DCHECK_IS_ON()) \
- if (std::string* _result = logging::Check##name##Impl( \
- (val1), (val2), #val1 " " #op " " #val2)) \
- logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, _result) \
- .stream()
+// The 'switch' is used to prevent the 'else' from being ambiguous when the
+// macro is used in an 'if' clause such as:
+// if (a == 1)
+// DCHECK_EQ(2, a);
+#define DCHECK_OP(name, op, val1, val2) \
+ switch (0) case 0: default: \
+ if (logging::CheckOpResult true_if_passed = \
+ DCHECK_IS_ON() ? \
+ logging::Check##name##Impl((val1), (val2), \
+ #val1 " " #op " " #val2) : nullptr) \
+ ; \
+ else \
+ logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
+ true_if_passed.message()).stream()
// Equality/Inequality checks - compare two values, and log a
// LOG_DCHECK message including the two values when the result is not
@@ -727,6 +755,9 @@ class BASE_EXPORT LogMessage {
// Used for LOG(severity).
LogMessage(const char* file, int line, LogSeverity severity);
+ // Used for CHECK(). Implied severity = LOG_FATAL.
+ LogMessage(const char* file, int line, const char* condition);
+
// Used for CHECK_EQ(), etc. Takes ownership of the given string.
// Implied severity = LOG_FATAL.
LogMessage(const char* file, int line, std::string* result);
@@ -775,7 +806,7 @@ class BASE_EXPORT LogMessage {
// A non-macro interface to the log facility; (useful
// when the logging level is not a compile-time constant).
-inline void LogAtLevel(int const log_level, std::string const &msg) {
+inline void LogAtLevel(int log_level, const std::string& msg) {
LogMessage(__FILE__, __LINE__, log_level).stream() << msg;
}
@@ -861,6 +892,9 @@ BASE_EXPORT void RawLog(int level, const char* message);
} while (0)
#if defined(OS_WIN)
+// Returns true if logging to file is enabled.
+BASE_EXPORT bool IsLoggingToFileEnabled();
+
// Returns the default log file path.
BASE_EXPORT std::wstring GetLogFileFullPath();
#endif
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 8b9701a545f..e0619425f75 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -234,6 +234,30 @@ TEST_F(LoggingTest, DcheckReleaseBehavior) {
DCHECK_EQ(some_variable, 1) << "test";
}
+TEST_F(LoggingTest, DCheckEqStatements) {
+ bool reached = false;
+ if (false)
+ DCHECK_EQ(false, true); // Unreached.
+ else
+ DCHECK_EQ(true, reached = true); // Reached, passed.
+ ASSERT_EQ(DCHECK_IS_ON() ? true : false, reached);
+
+ if (false)
+ DCHECK_EQ(false, true); // Unreached.
+}
+
+TEST_F(LoggingTest, CheckEqStatements) {
+ bool reached = false;
+ if (false)
+ CHECK_EQ(false, true); // Unreached.
+ else
+ CHECK_EQ(true, reached = true); // Reached, passed.
+ ASSERT_TRUE(reached);
+
+ if (false)
+ CHECK_EQ(false, true); // Unreached.
+}
+
// Test that defining an operator<< for a type in a namespace doesn't prevent
// other code in that namespace from calling the operator<<(ostream, wstring)
// defined by logging.h. This can fail if operator<<(ostream, wstring) can't be
diff --git a/chromium/base/logging_win.cc b/chromium/base/logging_win.cc
index 53cc37c4ea3..319ae8a9d38 100644
--- a/chromium/base/logging_win.cc
+++ b/chromium/base/logging_win.cc
@@ -18,8 +18,8 @@ LogEventProvider::LogEventProvider() : old_log_level_(LOG_NONE) {
}
LogEventProvider* LogEventProvider::GetInstance() {
- return Singleton<LogEventProvider,
- StaticMemorySingletonTraits<LogEventProvider> >::get();
+ return base::Singleton<LogEventProvider, base::StaticMemorySingletonTraits<
+ LogEventProvider>>::get();
}
bool LogEventProvider::LogMessage(logging::LogSeverity severity,
diff --git a/chromium/base/logging_win.h b/chromium/base/logging_win.h
index aa48e22e76d..de34a644093 100644
--- a/chromium/base/logging_win.h
+++ b/chromium/base/logging_win.h
@@ -12,8 +12,10 @@
#include "base/win/event_trace_provider.h"
#include "base/logging.h"
+namespace base {
template <typename Type>
struct StaticMemorySingletonTraits;
+} // namespace base
namespace logging {
@@ -71,7 +73,7 @@ class BASE_EXPORT LogEventProvider : public base::win::EtwTraceProvider {
// restored in OnEventsDisabled.
logging::LogSeverity old_log_level_;
- friend struct StaticMemorySingletonTraits<LogEventProvider>;
+ friend struct base::StaticMemorySingletonTraits<LogEventProvider>;
DISALLOW_COPY_AND_ASSIGN(LogEventProvider);
};
diff --git a/chromium/base/mac/foundation_util.h b/chromium/base/mac/foundation_util.h
index 353ed7c6318..6e8505da21b 100644
--- a/chromium/base/mac/foundation_util.h
+++ b/chromium/base/mac/foundation_util.h
@@ -373,6 +373,14 @@ BASE_EXPORT NSString* FilePathToNSString(const FilePath& path);
// Converts |str| to a FilePath. Returns an empty path if |str| is nil.
BASE_EXPORT FilePath NSStringToFilePath(NSString* str);
+#if defined(__OBJC__)
+// Converts |range| to an NSRange, returning the new range in |range_out|.
+// Returns true if conversion was successful, false if the values of |range|
+// could not be converted to NSUIntegers.
+BASE_EXPORT bool CFRangeToNSRange(CFRange range,
+ NSRange* range_out) WARN_UNUSED_RESULT;
+#endif // defined(__OBJC__)
+
} // namespace mac
} // namespace base
diff --git a/chromium/base/mac/foundation_util.mm b/chromium/base/mac/foundation_util.mm
index 27d6e7c4653..bd5d51452dc 100644
--- a/chromium/base/mac/foundation_util.mm
+++ b/chromium/base/mac/foundation_util.mm
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/mac/bundle_locations.h"
#include "base/mac/mac_logging.h"
+#include "base/numerics/safe_conversions.h"
#include "base/strings/sys_string_conversions.h"
#if !defined(OS_IOS)
@@ -430,6 +431,19 @@ FilePath NSStringToFilePath(NSString* str) {
return FilePath([str fileSystemRepresentation]);
}
+bool CFRangeToNSRange(CFRange range, NSRange* range_out) {
+ if (base::IsValueInRangeForNumericType<decltype(range_out->location)>(
+ range.location) &&
+ base::IsValueInRangeForNumericType<decltype(range_out->length)>(
+ range.length) &&
+ base::IsValueInRangeForNumericType<decltype(range_out->location)>(
+ range.location + range.length)) {
+ *range_out = NSMakeRange(range.location, range.length);
+ return true;
+ }
+ return false;
+}
+
} // namespace mac
} // namespace base
diff --git a/chromium/base/mac/foundation_util_unittest.mm b/chromium/base/mac/foundation_util_unittest.mm
index e60a0f6b649..c688442a688 100644
--- a/chromium/base/mac/foundation_util_unittest.mm
+++ b/chromium/base/mac/foundation_util_unittest.mm
@@ -317,6 +317,18 @@ TEST(FoundationUtilTest, NSStringToFilePath) {
EXPECT_EQ(FilePath("/a/b"), NSStringToFilePath(@"/a/b"));
}
+TEST(FoundationUtilTest, CFRangeToNSRange) {
+ NSRange range_out;
+ EXPECT_TRUE(CFRangeToNSRange(CFRangeMake(10, 5), &range_out));
+ EXPECT_EQ(10UL, range_out.location);
+ EXPECT_EQ(5UL, range_out.length);
+ EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, 5), &range_out));
+ EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(5, -1), &range_out));
+ EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(-1, -1), &range_out));
+ EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MAX, LONG_MAX), &range_out));
+ EXPECT_FALSE(CFRangeToNSRange(CFRangeMake(LONG_MIN, LONG_MAX), &range_out));
+}
+
TEST(StringNumberConversionsTest, FormatNSInteger) {
// The PRI[dxu]NS macro assumes that NSInteger is a typedef to "int" on
// 32-bit architecture and a typedef to "long" on 64-bit architecture
diff --git a/chromium/base/mac/mac_util.h b/chromium/base/mac/mac_util.h
index 060f35b6c0c..a452a39bdd4 100644
--- a/chromium/base/mac/mac_util.h
+++ b/chromium/base/mac/mac_util.h
@@ -145,6 +145,7 @@ BASE_EXPORT bool IsOSMavericksOrLater();
// Yosemite is Mac OS X 10.10, Darwin 14.
BASE_EXPORT bool IsOSYosemite();
+BASE_EXPORT bool IsOSYosemiteOrEarlier();
BASE_EXPORT bool IsOSYosemiteOrLater();
// El Capitan is Mac OS X 10.11, Darwin 15.
@@ -154,7 +155,7 @@ BASE_EXPORT bool IsOSElCapitanOrLater();
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
-BASE_EXPORT bool IsOSLaterThanYosemite_DontCallThis();
+BASE_EXPORT bool IsOSLaterThanElCapitan_DontCallThis();
// Inline functions that are redundant due to version ranges being mutually-
// exclusive.
@@ -215,7 +216,6 @@ inline bool IsOSYosemiteOrLater() { return true; }
MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_10
#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_10
inline bool IsOSYosemite() { return false; }
-inline bool IsOSLaterThanYosemite_DontCallThis() { return true; }
#endif
#if defined(MAC_OS_X_VERSION_10_11) && \
@@ -228,6 +228,7 @@ inline bool IsOSElCapitanOrLater() { return true; }
MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
inline bool IsOSElCapitan() { return false; }
+inline bool IsOSLaterThanElCapitan_DontCallThis() { return true; }
#endif
// Retrieve the system's model identifier string from the IOKit registry:
diff --git a/chromium/base/mac/mac_util.mm b/chromium/base/mac/mac_util.mm
index e1e15dc5382..647faf3866f 100644
--- a/chromium/base/mac/mac_util.mm
+++ b/chromium/base/mac/mac_util.mm
@@ -463,7 +463,7 @@ int MacOSXMinorVersionInternal() {
// immediate death.
CHECK(darwin_major_version >= 6);
int mac_os_x_minor_version = darwin_major_version - 4;
- DLOG_IF(WARNING, darwin_major_version > 14) << "Assuming Darwin "
+ DLOG_IF(WARNING, darwin_major_version > 15) << "Assuming Darwin "
<< base::IntToString(darwin_major_version) << " is Mac OS X 10."
<< base::IntToString(mac_os_x_minor_version);
@@ -542,12 +542,6 @@ bool IsOSYosemiteOrLater() {
}
#endif
-#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_10)
-bool IsOSLaterThanYosemite_DontCallThis() {
- return MacOSXMinorVersion() > YOSEMITE_MINOR_VERSION;
-}
-#endif
-
#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11)
bool IsOSElCapitan() {
return MacOSXMinorVersion() == EL_CAPITAN_MINOR_VERSION;
@@ -560,6 +554,12 @@ bool IsOSElCapitanOrLater() {
}
#endif
+#if !defined(BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11)
+bool IsOSLaterThanElCapitan_DontCallThis() {
+ return MacOSXMinorVersion() > EL_CAPITAN_MINOR_VERSION;
+}
+#endif
+
std::string GetModelIdentifier() {
std::string return_string;
ScopedIOObject<io_service_t> platform_expert(
diff --git a/chromium/base/mac/mac_util_unittest.mm b/chromium/base/mac/mac_util_unittest.mm
index 3982ab00dfc..35b6e568f05 100644
--- a/chromium/base/mac/mac_util_unittest.mm
+++ b/chromium/base/mac/mac_util_unittest.mm
@@ -154,8 +154,11 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
EXPECT_TRUE(IsOSMavericksOrEarlier());
EXPECT_FALSE(IsOSMavericksOrLater());
EXPECT_FALSE(IsOSYosemite());
+ EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_FALSE(IsOSYosemiteOrLater());
- EXPECT_FALSE(IsOSLaterThanYosemite_DontCallThis());
+ EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_FALSE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
} else if (minor == 7) {
EXPECT_FALSE(IsOSSnowLeopard());
EXPECT_TRUE(IsOSLion());
@@ -168,8 +171,11 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
EXPECT_TRUE(IsOSMavericksOrEarlier());
EXPECT_FALSE(IsOSMavericksOrLater());
EXPECT_FALSE(IsOSYosemite());
+ EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_FALSE(IsOSYosemiteOrLater());
- EXPECT_FALSE(IsOSLaterThanYosemite_DontCallThis());
+ EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_FALSE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
} else if (minor == 8) {
EXPECT_FALSE(IsOSSnowLeopard());
EXPECT_FALSE(IsOSLion());
@@ -182,8 +188,11 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
EXPECT_TRUE(IsOSMavericksOrEarlier());
EXPECT_FALSE(IsOSMavericksOrLater());
EXPECT_FALSE(IsOSYosemite());
+ EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_FALSE(IsOSYosemiteOrLater());
- EXPECT_FALSE(IsOSLaterThanYosemite_DontCallThis());
+ EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_FALSE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
} else if (minor == 9) {
EXPECT_FALSE(IsOSSnowLeopard());
EXPECT_FALSE(IsOSLion());
@@ -196,8 +205,11 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
EXPECT_TRUE(IsOSMavericksOrEarlier());
EXPECT_TRUE(IsOSMavericksOrLater());
EXPECT_FALSE(IsOSYosemite());
+ EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_FALSE(IsOSYosemiteOrLater());
- EXPECT_FALSE(IsOSLaterThanYosemite_DontCallThis());
+ EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_FALSE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
} else if (minor == 10) {
EXPECT_FALSE(IsOSSnowLeopard());
EXPECT_FALSE(IsOSLion());
@@ -210,10 +222,30 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
EXPECT_FALSE(IsOSMavericksOrEarlier());
EXPECT_TRUE(IsOSMavericksOrLater());
EXPECT_TRUE(IsOSYosemite());
+ EXPECT_TRUE(IsOSYosemiteOrEarlier());
EXPECT_TRUE(IsOSYosemiteOrLater());
- EXPECT_FALSE(IsOSLaterThanYosemite_DontCallThis());
+ EXPECT_FALSE(IsOSElCapitan());
+ EXPECT_FALSE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
+ } else if (minor == 11) {
+ EXPECT_FALSE(IsOSSnowLeopard());
+ EXPECT_FALSE(IsOSLion());
+ EXPECT_FALSE(IsOSLionOrEarlier());
+ EXPECT_TRUE(IsOSLionOrLater());
+ EXPECT_FALSE(IsOSMountainLion());
+ EXPECT_FALSE(IsOSMountainLionOrEarlier());
+ EXPECT_TRUE(IsOSMountainLionOrLater());
+ EXPECT_FALSE(IsOSMavericks());
+ EXPECT_FALSE(IsOSMavericksOrEarlier());
+ EXPECT_TRUE(IsOSMavericksOrLater());
+ EXPECT_FALSE(IsOSYosemite());
+ EXPECT_FALSE(IsOSYosemiteOrEarlier());
+ EXPECT_TRUE(IsOSYosemiteOrLater());
+ EXPECT_TRUE(IsOSElCapitan());
+ EXPECT_TRUE(IsOSElCapitanOrLater());
+ EXPECT_FALSE(IsOSLaterThanElCapitan_DontCallThis());
} else {
- // Not six, seven, eight, nine, or ten. Ah, ah, ah.
+ // Not six, seven, eight, nine, ten, or eleven. Ah, ah, ah.
EXPECT_TRUE(false);
}
} else {
diff --git a/chromium/base/mac/scoped_nsexception_enabler.h b/chromium/base/mac/scoped_nsexception_enabler.h
deleted file mode 100644
index 484dd534496..00000000000
--- a/chromium/base/mac/scoped_nsexception_enabler.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_SCOPED_NSEXCEPTION_ENABLER_H_
-#define BASE_MAC_SCOPED_NSEXCEPTION_ENABLER_H_
-
-#import <Foundation/Foundation.h>
-
-#include "base/base_export.h"
-#include "base/basictypes.h"
-
-namespace base {
-namespace mac {
-
-// BrowserCrApplication attempts to restrict throwing of NSExceptions
-// because they interact badly with C++ scoping rules. Unfortunately,
-// there are some cases where exceptions must be supported, such as
-// when third-party printer drivers are used. These helpers can be
-// used to enable exceptions for narrow windows.
-
-// Make it easy to safely allow NSException to be thrown in a limited
-// scope. Note that if an exception is thrown, then this object will
-// not be appropriately destructed! If the exception ends up in the
-// top-level event loop, things are cleared in -reportException:. If
-// the exception is caught at a lower level, a higher level scoper
-// should eventually reset things.
-class BASE_EXPORT ScopedNSExceptionEnabler {
- public:
- ScopedNSExceptionEnabler();
- ~ScopedNSExceptionEnabler();
-
- private:
- bool was_enabled_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedNSExceptionEnabler);
-};
-
-// Access the exception setting for the current thread. This is for
-// the support code in BrowserCrApplication, other code should use
-// the scoper.
-BASE_EXPORT bool GetNSExceptionsAllowed();
-BASE_EXPORT void SetNSExceptionsAllowed(bool allowed);
-
-// Executes |block| with fatal-exceptions turned off, and returns the
-// result. If an exception is thrown during the perform, nil is
-// returned.
-typedef id (^BlockReturningId)();
-BASE_EXPORT id RunBlockIgnoringExceptions(BlockReturningId block);
-
-} // namespace mac
-} // namespace base
-
-#endif // BASE_MAC_SCOPED_NSEXCEPTION_ENABLER_H_
diff --git a/chromium/base/mac/scoped_nsexception_enabler.mm b/chromium/base/mac/scoped_nsexception_enabler.mm
deleted file mode 100644
index 7b8ad9266b1..00000000000
--- a/chromium/base/mac/scoped_nsexception_enabler.mm
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#import "base/mac/scoped_nsexception_enabler.h"
-
-#import "base/lazy_instance.h"
-#import "base/threading/thread_local.h"
-
-// To make the |g_exceptionsAllowed| declaration readable.
-using base::LazyInstance;
-using base::ThreadLocalBoolean;
-
-// When C++ exceptions are disabled, the C++ library defines |try| and
-// |catch| so as to allow exception-expecting C++ code to build properly when
-// language support for exceptions is not present. These macros interfere
-// with the use of |@try| and |@catch| in Objective-C files such as this one.
-// Undefine these macros here, after everything has been #included, since
-// there will be no C++ uses and only Objective-C uses from this point on.
-#undef try
-#undef catch
-
-namespace {
-
-// Whether to allow NSExceptions to be raised on the current thread.
-LazyInstance<ThreadLocalBoolean>::Leaky
- g_exceptionsAllowed = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
-
-namespace base {
-namespace mac {
-
-bool GetNSExceptionsAllowed() {
- return g_exceptionsAllowed.Get().Get();
-}
-
-void SetNSExceptionsAllowed(bool allowed) {
- return g_exceptionsAllowed.Get().Set(allowed);
-}
-
-id RunBlockIgnoringExceptions(BlockReturningId block) {
- id ret = nil;
- @try {
- base::mac::ScopedNSExceptionEnabler enable;
- ret = block();
- }
- @catch(id exception) {
- }
- return ret;
-}
-
-ScopedNSExceptionEnabler::ScopedNSExceptionEnabler() {
- was_enabled_ = GetNSExceptionsAllowed();
- SetNSExceptionsAllowed(true);
-}
-
-ScopedNSExceptionEnabler::~ScopedNSExceptionEnabler() {
- SetNSExceptionsAllowed(was_enabled_);
-}
-
-} // namespace mac
-} // namespace base
diff --git a/chromium/base/mac/sdk_forward_declarations.h b/chromium/base/mac/sdk_forward_declarations.h
index e45ab43eccb..79d2e09a199 100644
--- a/chromium/base/mac/sdk_forward_declarations.h
+++ b/chromium/base/mac/sdk_forward_declarations.h
@@ -239,6 +239,7 @@ BASE_EXPORT extern NSString* const NSWindowDidExitFullScreenNotification;
BASE_EXPORT extern NSString* const
NSWindowDidChangeBackingPropertiesNotification;
BASE_EXPORT extern NSString* const CBAdvertisementDataServiceDataKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceUUIDsKey;
BASE_EXPORT extern NSString* const
NSPreferredScrollerStyleDidChangeNotification;
#endif // MAC_OS_X_VERSION_10_7
@@ -246,6 +247,7 @@ BASE_EXPORT extern NSString* const
#if !defined(MAC_OS_X_VERSION_10_9) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataOverflowServiceUUIDsKey;
BASE_EXPORT extern NSString* const CBAdvertisementDataIsConnectable;
#endif // MAC_OS_X_VERSION_10_9
@@ -489,6 +491,10 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantDark;
- (NSString*)UUIDString;
@end
+@interface NSViewController (YosemiteSDK)
+- (void)viewDidLoad;
+@end
+
#endif // MAC_OS_X_VERSION_10_10
// ----------------------------------------------------------------------------
diff --git a/chromium/base/mac/sdk_forward_declarations.mm b/chromium/base/mac/sdk_forward_declarations.mm
index 2e4b2d98457..7c5e260ed97 100644
--- a/chromium/base/mac/sdk_forward_declarations.mm
+++ b/chromium/base/mac/sdk_forward_declarations.mm
@@ -23,6 +23,8 @@ NSString* const NSWindowDidChangeBackingPropertiesNotification =
NSString* const CBAdvertisementDataServiceDataKey = @"kCBAdvDataServiceData";
+NSString* const CBAdvertisementDataServiceUUIDsKey = @"kCBAdvDataServiceUUIDs";
+
NSString* const NSPreferredScrollerStyleDidChangeNotification =
@"NSPreferredScrollerStyleDidChangeNotification";
#endif // MAC_OS_X_VERSION_10_7
@@ -32,6 +34,9 @@ NSString* const NSPreferredScrollerStyleDidChangeNotification =
NSString* const NSWindowDidChangeOcclusionStateNotification =
@"NSWindowDidChangeOcclusionStateNotification";
+NSString* const CBAdvertisementDataOverflowServiceUUIDsKey =
+ @"kCBAdvDataOverflowServiceUUIDs";
+
NSString* const CBAdvertisementDataIsConnectable = @"kCBAdvDataIsConnectable";
#endif // MAC_OS_X_VERSION_10_9
diff --git a/chromium/base/macros.h b/chromium/base/macros.h
index 53b3926cd24..c5f503fbc3a 100644
--- a/chromium/base/macros.h
+++ b/chromium/base/macros.h
@@ -55,29 +55,6 @@
template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
#define arraysize(array) (sizeof(ArraySizeHelper(array)))
-
-// Use implicit_cast as a safe version of static_cast or const_cast
-// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
-// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
-// a const pointer to Foo).
-// When you use implicit_cast, the compiler checks that the cast is safe.
-// Such explicit implicit_casts are necessary in surprisingly many
-// situations where C++ demands an exact type match instead of an
-// argument type convertible to a target type.
-//
-// The From type can be inferred, so the preferred syntax for using
-// implicit_cast is the same as for static_cast etc.:
-//
-// implicit_cast<ToType>(expr)
-//
-// implicit_cast would have been part of the C++ standard library,
-// but the proposal was submitted too late. It will probably make
-// its way into the language in the future.
-template<typename To, typename From>
-inline To implicit_cast(From const &f) {
- return f;
-}
-
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
diff --git a/chromium/base/memory/BUILD.gn b/chromium/base/memory/BUILD.gn
index 79d99ebba70..20704472ca9 100644
--- a/chromium/base/memory/BUILD.gn
+++ b/chromium/base/memory/BUILD.gn
@@ -37,6 +37,7 @@ source_set("memory") {
"shared_memory_android.cc",
"shared_memory_handle.h",
"shared_memory_handle_mac.cc",
+ "shared_memory_handle_win.cc",
"shared_memory_mac.cc",
"shared_memory_nacl.cc",
"shared_memory_posix.cc",
@@ -46,6 +47,12 @@ source_set("memory") {
"weak_ptr.cc",
"weak_ptr.h",
]
+ if (is_ios) {
+ sources -= [
+ "discardable_shared_memory.cc",
+ "discardable_shared_memory.h",
+ ]
+ }
if (is_nacl) {
sources -= [
diff --git a/chromium/base/memory/discardable_memory.h b/chromium/base/memory/discardable_memory.h
index fc189e74634..c64fc4f7cf3 100644
--- a/chromium/base/memory/discardable_memory.h
+++ b/chromium/base/memory/discardable_memory.h
@@ -11,6 +11,11 @@
namespace base {
+namespace trace_event {
+class MemoryAllocatorDump;
+class ProcessMemoryDump;
+}
+
// Discardable memory is used to cache large objects without worrying about
// blowing out memory, both on mobile devices where there is no swap, and
// desktop devices where unused free memory should be used to help the user
@@ -59,6 +64,14 @@ class BASE_EXPORT DiscardableMemory {
template<typename T> T* data_as() const {
return reinterpret_cast<T*>(data());
}
+
+ // Used for dumping the statistics of discardable memory allocated in tracing.
+ // Returns a new MemoryAllocatorDump in the |pmd| with the size of the
+ // discardable memory. The MemoryAllocatorDump created is owned by |pmd|. See
+ // ProcessMemoryDump::CreateAllocatorDump.
+ virtual trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
+ const char* name,
+ trace_event::ProcessMemoryDump* pmd) const = 0;
};
} // namespace base
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index 4fd15185ebc..d0eaca1a662 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -11,6 +11,7 @@
#include <algorithm>
#include "base/atomicops.h"
+#include "base/bits.h"
#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "base/process/process_metrics.h"
@@ -89,15 +90,9 @@ SharedState* SharedStateFromSharedMemory(const SharedMemory& shared_memory) {
return static_cast<SharedState*>(shared_memory.memory());
}
-// Round up |size| to a multiple of alignment, which must be a power of two.
-size_t Align(size_t alignment, size_t size) {
- DCHECK_EQ(alignment & (alignment - 1), 0u);
- return (size + alignment - 1) & ~(alignment - 1);
-}
-
// Round up |size| to a multiple of page size.
size_t AlignToPageSize(size_t size) {
- return Align(base::GetPageSize(), size);
+ return bits::Align(size, base::GetPageSize());
}
} // namespace
@@ -345,6 +340,15 @@ bool DiscardableSharedMemory::IsMemoryResident() const {
!result.GetTimestamp().is_null();
}
+bool DiscardableSharedMemory::IsMemoryLocked() const {
+ DCHECK(shared_memory_.memory());
+
+ SharedState result(subtle::NoBarrier_Load(
+ &SharedStateFromSharedMemory(shared_memory_)->value.i));
+
+ return result.GetLockState() == SharedState::LOCKED;
+}
+
void DiscardableSharedMemory::Close() {
shared_memory_.Close();
}
diff --git a/chromium/base/memory/discardable_shared_memory.h b/chromium/base/memory/discardable_shared_memory.h
index 74bbe8e95f4..5dca884ca82 100644
--- a/chromium/base/memory/discardable_shared_memory.h
+++ b/chromium/base/memory/discardable_shared_memory.h
@@ -107,6 +107,9 @@ class BASE_EXPORT DiscardableSharedMemory {
// Returns true if memory is still resident.
bool IsMemoryResident() const;
+ // Returns true if memory is locked.
+ bool IsMemoryLocked() const;
+
// Closes the open discardable memory segment.
// It is safe to call Close repeatedly.
void Close();
diff --git a/chromium/base/memory/discardable_shared_memory_unittest.cc b/chromium/base/memory/discardable_shared_memory_unittest.cc
index 91b0b68523a..d5b71d31cb0 100644
--- a/chromium/base/memory/discardable_shared_memory_unittest.cc
+++ b/chromium/base/memory/discardable_shared_memory_unittest.cc
@@ -33,6 +33,7 @@ TEST(DiscardableSharedMemoryTest, CreateAndMap) {
bool rv = memory.CreateAndMap(kDataSize);
ASSERT_TRUE(rv);
EXPECT_GE(memory.mapped_size(), kDataSize);
+ EXPECT_TRUE(memory.IsMemoryLocked());
}
TEST(DiscardableSharedMemoryTest, CreateFromHandle) {
@@ -50,6 +51,7 @@ TEST(DiscardableSharedMemoryTest, CreateFromHandle) {
TestDiscardableSharedMemory memory2(shared_handle);
rv = memory2.Map(kDataSize);
ASSERT_TRUE(rv);
+ EXPECT_TRUE(memory2.IsMemoryLocked());
}
TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
@@ -62,6 +64,7 @@ TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
// Memory is initially locked. Unlock it.
memory1.SetNow(Time::FromDoubleT(1));
memory1.Unlock(0, 0);
+ EXPECT_FALSE(memory1.IsMemoryLocked());
// Lock and unlock memory.
auto lock_rv = memory1.Lock(0, 0);
@@ -72,6 +75,7 @@ TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
// Lock again before duplicating and passing ownership to new instance.
lock_rv = memory1.Lock(0, 0);
EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+ EXPECT_TRUE(memory1.IsMemoryLocked());
SharedMemoryHandle shared_handle;
ASSERT_TRUE(
@@ -86,13 +90,18 @@ TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
memory2.SetNow(Time::FromDoubleT(3));
memory2.Unlock(0, 0);
+ // Both memory instances should be unlocked now.
+ EXPECT_FALSE(memory2.IsMemoryLocked());
+ EXPECT_FALSE(memory1.IsMemoryLocked());
+
// Lock second instance before passing ownership back to first instance.
lock_rv = memory2.Lock(0, 0);
EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
- // Memory should still be resident.
+ // Memory should still be resident and locked.
rv = memory1.IsMemoryResident();
EXPECT_TRUE(rv);
+ EXPECT_TRUE(memory1.IsMemoryLocked());
// Unlock first instance.
memory1.SetNow(Time::FromDoubleT(4));
diff --git a/chromium/base/memory/memory_pressure_listener.cc b/chromium/base/memory/memory_pressure_listener.cc
index 2a1be74ebb9..9fd675aa4e5 100644
--- a/chromium/base/memory/memory_pressure_listener.cc
+++ b/chromium/base/memory/memory_pressure_listener.cc
@@ -32,6 +32,10 @@ LazyInstance<
ObserverListThreadSafe<MemoryPressureListener>,
LeakyLazyObserverListTraits> g_observers = LAZY_INSTANCE_INITIALIZER;
+// All memory pressure notifications within this process will be suppressed if
+// this variable is set to 1.
+subtle::Atomic32 g_notifications_suppressed = 0;
+
} // namespace
MemoryPressureListener::MemoryPressureListener(
@@ -54,6 +58,32 @@ void MemoryPressureListener::NotifyMemoryPressure(
DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
TRACE_EVENT1("memory", "MemoryPressureListener::NotifyMemoryPressure",
"level", memory_pressure_level);
+ if (AreNotificationsSuppressed())
+ return;
+ DoNotifyMemoryPressure(memory_pressure_level);
+}
+
+// static
+bool MemoryPressureListener::AreNotificationsSuppressed() {
+ return subtle::Acquire_Load(&g_notifications_suppressed) == 1;
+}
+
+// static
+void MemoryPressureListener::SetNotificationsSuppressed(bool suppress) {
+ subtle::Release_Store(&g_notifications_suppressed, suppress ? 1 : 0);
+}
+
+// static
+void MemoryPressureListener::SimulatePressureNotification(
+ MemoryPressureLevel memory_pressure_level) {
+ // Notify all listeners even if regular pressure notifications are suppressed.
+ DoNotifyMemoryPressure(memory_pressure_level);
+}
+
+// static
+void MemoryPressureListener::DoNotifyMemoryPressure(
+ MemoryPressureLevel memory_pressure_level) {
+ DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
g_observers.Get().Notify(FROM_HERE, &MemoryPressureListener::Notify,
memory_pressure_level);
}
diff --git a/chromium/base/memory/memory_pressure_listener.h b/chromium/base/memory/memory_pressure_listener.h
index 6adaeeed7fd..290657e7260 100644
--- a/chromium/base/memory/memory_pressure_listener.h
+++ b/chromium/base/memory/memory_pressure_listener.h
@@ -72,9 +72,19 @@ class BASE_EXPORT MemoryPressureListener {
// Intended for use by the platform specific implementation.
static void NotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
+ // These methods should not be used anywhere else but in memory measurement
+ // code, where they are intended to maintain stable conditions across
+ // measurements.
+ static bool AreNotificationsSuppressed();
+ static void SetNotificationsSuppressed(bool suppressed);
+ static void SimulatePressureNotification(
+ MemoryPressureLevel memory_pressure_level);
+
private:
void Notify(MemoryPressureLevel memory_pressure_level);
+ static void DoNotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
+
MemoryPressureCallback callback_;
DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
diff --git a/chromium/base/memory/memory_pressure_listener_unittest.cc b/chromium/base/memory/memory_pressure_listener_unittest.cc
new file mode 100644
index 00000000000..38d429d3156
--- /dev/null
+++ b/chromium/base/memory/memory_pressure_listener_unittest.cc
@@ -0,0 +1,78 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_listener.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+using MemoryPressureLevel = MemoryPressureListener::MemoryPressureLevel;
+
+class MemoryPressureListenerTest : public testing::Test {
+ public:
+ void SetUp() override {
+ message_loop_.reset(new MessageLoopForUI());
+ listener_.reset(new MemoryPressureListener(
+ Bind(&MemoryPressureListenerTest::OnMemoryPressure, Unretained(this))));
+ }
+
+ void TearDown() override {
+ listener_.reset();
+ message_loop_.reset();
+ }
+
+ protected:
+ void ExpectNotification(
+ void (*notification_function)(MemoryPressureLevel),
+ MemoryPressureLevel level) {
+ EXPECT_CALL(*this, OnMemoryPressure(level)).Times(1);
+ notification_function(level);
+ message_loop_->RunUntilIdle();
+ }
+
+ void ExpectNoNotification(
+ void (*notification_function)(MemoryPressureLevel),
+ MemoryPressureLevel level) {
+ EXPECT_CALL(*this, OnMemoryPressure(testing::_)).Times(0);
+ notification_function(level);
+ message_loop_->RunUntilIdle();
+ }
+
+ private:
+ MOCK_METHOD1(OnMemoryPressure,
+ void(MemoryPressureListener::MemoryPressureLevel));
+
+ scoped_ptr<MessageLoopForUI> message_loop_;
+ scoped_ptr<MemoryPressureListener> listener_;
+};
+
+TEST_F(MemoryPressureListenerTest, NotifyMemoryPressure) {
+ // Memory pressure notifications are not suppressed by default.
+ EXPECT_FALSE(MemoryPressureListener::AreNotificationsSuppressed());
+ ExpectNotification(&MemoryPressureListener::NotifyMemoryPressure,
+ MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+ ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+ MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+ // Enable suppressing memory pressure notifications.
+ MemoryPressureListener::SetNotificationsSuppressed(true);
+ EXPECT_TRUE(MemoryPressureListener::AreNotificationsSuppressed());
+ ExpectNoNotification(&MemoryPressureListener::NotifyMemoryPressure,
+ MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+ ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+ MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+ // Disable suppressing memory pressure notifications.
+ MemoryPressureListener::SetNotificationsSuppressed(false);
+ EXPECT_FALSE(MemoryPressureListener::AreNotificationsSuppressed());
+ ExpectNotification(&MemoryPressureListener::NotifyMemoryPressure,
+ MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_CRITICAL);
+ ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+ MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_CRITICAL);
+}
+
+} // namespace base
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.cc b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
index 640e4633fd5..0c3d979b357 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.cc
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
@@ -11,6 +11,7 @@
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "base/single_thread_task_runner.h"
+#include "base/sys_info.h"
#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
@@ -114,7 +115,9 @@ MemoryPressureMonitor::MemoryPressureMonitor(
low_mem_file_(HANDLE_EINTR(::open(kLowMemFile, O_RDONLY))),
weak_ptr_factory_(this) {
StartObserving();
- LOG_IF(ERROR, !low_mem_file_.is_valid()) << "Cannot open kernel listener";
+ LOG_IF(ERROR,
+ base::SysInfo::IsRunningOnChromeOS() && !low_mem_file_.is_valid())
+ << "Cannot open kernel listener";
}
MemoryPressureMonitor::~MemoryPressureMonitor() {
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.h b/chromium/base/memory/memory_pressure_monitor_chromeos.h
index ff8992a5631..5529c3b48a3 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.h
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.h
@@ -7,7 +7,6 @@
#include "base/base_export.h"
#include "base/files/scoped_file.h"
-#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/memory/memory_pressure_monitor.h"
@@ -95,7 +94,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
// A periodic timer to check for resource pressure changes. This will get
// replaced by a kernel triggered event system (see crbug.com/381196).
- base::RepeatingTimer<MemoryPressureMonitor> timer_;
+ base::RepeatingTimer timer_;
// To slow down the amount of moderate pressure event calls, this counter
// gets used to count the number of events since the last event occured.
diff --git a/chromium/base/memory/memory_pressure_monitor_win.h b/chromium/base/memory/memory_pressure_monitor_win.h
index 07f04eb893e..030b8b33e28 100644
--- a/chromium/base/memory/memory_pressure_monitor_win.h
+++ b/chromium/base/memory/memory_pressure_monitor_win.h
@@ -117,7 +117,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
int critical_threshold_mb_;
// A periodic timer to check for memory pressure changes.
- base::RepeatingTimer<MemoryPressureMonitor> timer_;
+ base::RepeatingTimer timer_;
// The current memory pressure.
MemoryPressureLevel current_memory_pressure_level_;
diff --git a/chromium/base/memory/scoped_ptr.h b/chromium/base/memory/scoped_ptr.h
index 987ccfa804e..2399e954cc6 100644
--- a/chromium/base/memory/scoped_ptr.h
+++ b/chromium/base/memory/scoped_ptr.h
@@ -228,25 +228,14 @@ class scoped_ptr_impl {
// https://crbug.com/162971
assert(!ShouldAbortOnSelfReset<D>::value || p == nullptr || p != data_.ptr);
- // Note that running data_.ptr = p can lead to undefined behavior if
- // get_deleter()(get()) deletes this. In order to prevent this, reset()
- // should update the stored pointer before deleting its old value.
- //
- // However, changing reset() to use that behavior may cause current code to
- // break in unexpected ways. If the destruction of the owned object
- // dereferences the scoped_ptr when it is destroyed by a call to reset(),
- // then it will incorrectly dispatch calls to |p| rather than the original
- // value of |data_.ptr|.
- //
- // During the transition period, set the stored pointer to nullptr while
- // deleting the object. Eventually, this safety check will be removed to
- // prevent the scenario initially described from occuring and
- // http://crbug.com/176091 can be closed.
+ // Match C++11's definition of unique_ptr::reset(), which requires changing
+ // the pointer before invoking the deleter on the old pointer. This prevents
+ // |this| from being accessed after the deleter is run, which may destroy
+ // |this|.
T* old = data_.ptr;
- data_.ptr = nullptr;
+ data_.ptr = p;
if (old != nullptr)
static_cast<D&>(data_)(old);
- data_.ptr = p;
}
T* get() const { return data_.ptr; }
@@ -465,9 +454,7 @@ class scoped_ptr<T[], D> {
// (C++98 [expr.delete]p3). If you're doing this, fix your code.
// - it cannot be const-qualified differently from T per unique_ptr spec
// (http://cplusplus.github.com/LWG/lwg-active.html#2118). Users wanting
- // to work around this may use implicit_cast<const T*>().
- // However, because of the first bullet in this comment, users MUST
- // NOT use implicit_cast<Base*>() to upcast the static type of the array.
+ // to work around this may use const_cast<const T*>().
explicit scoped_ptr(element_type* array) : impl_(array) {}
// Constructor. Allows construction from a nullptr.
diff --git a/chromium/base/memory/scoped_ptr_unittest.cc b/chromium/base/memory/scoped_ptr_unittest.cc
index 766f4444001..71d995c452e 100644
--- a/chromium/base/memory/scoped_ptr_unittest.cc
+++ b/chromium/base/memory/scoped_ptr_unittest.cc
@@ -693,3 +693,26 @@ TEST(ScopedPtrTest, LoggingDoesntConvertToBoolean) {
EXPECT_EQ(s2.str(), s1.str());
}
+
+TEST(ScopedPtrTest, ReferenceCycle) {
+ struct StructB;
+ struct StructA {
+ scoped_ptr<StructB> b;
+ };
+
+ struct StructB {
+ scoped_ptr<StructA> a;
+ };
+
+ // Create a reference cycle.
+ StructA* a = new StructA;
+ a->b.reset(new StructB);
+ a->b->a.reset(a);
+
+ // Break the cycle by calling reset(). This will cause |a| (and hence, |a.b|)
+ // to be deleted before the call to reset() returns. This tests that the
+ // implementation of scoped_ptr::reset() doesn't access |this| after it
+ // deletes the underlying pointer. This behaviour is consistent with the
+ // definition of unique_ptr::reset in C++11.
+ a->b.reset();
+}
diff --git a/chromium/base/memory/scoped_vector_unittest.cc b/chromium/base/memory/scoped_vector_unittest.cc
index 220cfb04687..4dee9c9004e 100644
--- a/chromium/base/memory/scoped_vector_unittest.cc
+++ b/chromium/base/memory/scoped_vector_unittest.cc
@@ -24,7 +24,8 @@ class LifeCycleObject {
};
~LifeCycleObject() {
- observer_->OnLifeCycleDestroy(this);
+ if (observer_)
+ observer_->OnLifeCycleDestroy(this);
}
private:
@@ -35,6 +36,10 @@ class LifeCycleObject {
observer_->OnLifeCycleConstruct(this);
}
+ void DisconnectObserver() {
+ observer_ = nullptr;
+ }
+
Observer* observer_;
DISALLOW_COPY_AND_ASSIGN(LifeCycleObject);
@@ -62,7 +67,13 @@ enum LifeCycleState {
class LifeCycleWatcher : public LifeCycleObject::Observer {
public:
LifeCycleWatcher() : life_cycle_state_(LC_INITIAL) {}
- ~LifeCycleWatcher() override {}
+ ~LifeCycleWatcher() override {
+ // Stop watching the watched object. Without this, the object's destructor
+ // will call into OnLifeCycleDestroy when destructed, which happens after
+ // this destructor has finished running.
+ if (constructed_life_cycle_object_)
+ constructed_life_cycle_object_->DisconnectObserver();
+ }
// Assert INITIAL -> CONSTRUCTED and no LifeCycleObject associated with this
// LifeCycleWatcher.
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index 4326758177a..9de93d519d9 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -33,27 +33,34 @@ class FilePath;
// Options for creating a shared memory object.
struct SharedMemoryCreateOptions {
SharedMemoryCreateOptions()
- : name_deprecated(NULL),
- size(0),
- open_existing_deprecated(false),
+ : size(0),
executable(false),
- share_read_only(false) {}
+ share_read_only(false) {
+#if !defined(OS_MACOSX) || defined(OS_IOS)
+ name_deprecated = nullptr;
+ open_existing_deprecated = false;
+#endif
+ }
+#if !defined(OS_MACOSX) || defined(OS_IOS)
// DEPRECATED (crbug.com/345734):
// If NULL, the object is anonymous. This pointer is owned by the caller
// and must live through the call to Create().
const std::string* name_deprecated;
+#endif
// Size of the shared memory object to be created.
// When opening an existing object, this has no effect.
size_t size;
+#if !defined(OS_MACOSX) || defined(OS_IOS)
// DEPRECATED (crbug.com/345734):
// If true, and the shared memory already exists, Create() will open the
// existing shared memory and ignore the size parameter. If false,
// shared memory must not exist. This flag is meaningless unless
// name_deprecated is non-NULL.
bool open_existing_deprecated;
+#endif
// If true, mappings might need to be made executable later.
bool executable;
@@ -117,9 +124,11 @@ class BASE_EXPORT SharedMemory {
#endif
#if defined(OS_POSIX) && !defined(OS_ANDROID)
- // Returns the size of the shared memory region referred to by |handle|.
- // Returns '-1' on a failure to determine the size.
- static int GetSizeFromSharedMemoryHandle(const SharedMemoryHandle& handle);
+ // Gets the size of the shared memory region referred to by |handle|.
+ // Returns false on a failure to determine the size. On success, populates the
+ // output variable |size|.
+ static bool GetSizeFromSharedMemoryHandle(const SharedMemoryHandle& handle,
+ size_t* size);
#endif // defined(OS_POSIX) && !defined(OS_ANDROID)
// Creates a shared memory object as described by the options struct.
@@ -138,6 +147,7 @@ class BASE_EXPORT SharedMemory {
return Create(options);
}
+#if !defined(OS_MACOSX) || defined(OS_IOS)
// DEPRECATED (crbug.com/345734):
// Creates or opens a shared memory segment based on a name.
// If open_existing is true, and the shared memory already exists,
@@ -162,6 +172,7 @@ class BASE_EXPORT SharedMemory {
// If read_only is true, opens for read-only access.
// Returns true on success, false on failure.
bool Open(const std::string& name, bool read_only);
+#endif // !defined(OS_MACOSX) || defined(OS_IOS)
// Maps the shared memory into the caller's address space.
// Returns true on success, false otherwise. The memory address
@@ -191,7 +202,7 @@ class BASE_EXPORT SharedMemory {
// Gets a pointer to the opened memory space if it has been
// Mapped via Map(). Returns NULL if it is not mapped.
- void *memory() const { return memory_; }
+ void* memory() const { return memory_; }
// Returns the underlying OS handle for this segment.
// Use of this handle for anything other than an opaque
@@ -254,7 +265,9 @@ class BASE_EXPORT SharedMemory {
private:
#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
+#endif
#endif // defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
enum ShareMode {
SHARE_READONLY,
@@ -268,6 +281,15 @@ class BASE_EXPORT SharedMemory {
#if defined(OS_WIN)
std::wstring name_;
HANDLE mapped_file_;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+ // The OS primitive that backs the shared memory region.
+ SharedMemoryHandle shm_;
+
+ // The mechanism by which the memory is mapped. Only valid if |memory_| is not
+ // |nullptr|.
+ SharedMemoryHandle::Type mapped_memory_mechanism_;
+
+ int readonly_mapped_file_;
#elif defined(OS_POSIX)
int mapped_file_;
int readonly_mapped_file_;
diff --git a/chromium/base/memory/shared_memory_handle.h b/chromium/base/memory/shared_memory_handle.h
index 7af8729976d..43950a253ad 100644
--- a/chromium/base/memory/shared_memory_handle.h
+++ b/chromium/base/memory/shared_memory_handle.h
@@ -9,11 +9,14 @@
#if defined(OS_WIN)
#include <windows.h>
+#include "base/process/process_handle.h"
#elif defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
#include <sys/types.h>
#include "base/base_export.h"
#include "base/file_descriptor_posix.h"
#include "base/macros.h"
+#include "base/process/process_handle.h"
#elif defined(OS_POSIX)
#include <sys/types.h>
#include "base/file_descriptor_posix.h"
@@ -25,18 +28,57 @@ class Pickle;
// SharedMemoryHandle is a platform specific type which represents
// the underlying OS handle to a shared memory segment.
-#if defined(OS_WIN)
-typedef HANDLE SharedMemoryHandle;
-#elif defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
typedef FileDescriptor SharedMemoryHandle;
+#elif defined(OS_WIN)
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+ // The default constructor returns an invalid SharedMemoryHandle.
+ SharedMemoryHandle();
+ SharedMemoryHandle(HANDLE h, base::ProcessId pid);
+
+ // Standard copy constructor. The new instance shares the underlying OS
+ // primitives.
+ SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+ // Standard assignment operator. The updated instance shares the underlying
+ // OS primitives.
+ SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+ // Comparison operators.
+ bool operator==(const SharedMemoryHandle& handle) const;
+ bool operator!=(const SharedMemoryHandle& handle) const;
+
+ // Closes the underlying OS resources.
+ void Close() const;
+
+ // Whether the underlying OS primitive is valid.
+ bool IsValid() const;
+
+ // Whether |pid_| is the same as the current process's id.
+ bool BelongsToCurrentProcess() const;
+
+ // Whether handle_ needs to be duplicated into the destination process when
+ // an instance of this class is passed over a Chrome IPC channel.
+ bool NeedsBrokering() const;
+
+ HANDLE GetHandle() const;
+ base::ProcessId GetPID() const;
+
+ private:
+ HANDLE handle_;
+
+ // The process in which |handle_| is valid and can be used. If |handle_| is
+ // invalid, this will be kNullProcessId.
+ base::ProcessId pid_;
+};
#else
class BASE_EXPORT SharedMemoryHandle {
public:
enum Type {
- // Indicates that the SharedMemoryHandle is backed by a POSIX fd.
+ // The SharedMemoryHandle is backed by a POSIX fd.
POSIX,
- // Indicates that the SharedMemoryHandle is backed by the Mach primitive
- // "memory object".
+ // The SharedMemoryHandle is backed by the Mach primitive "memory object".
MACH,
};
@@ -56,6 +98,16 @@ class BASE_EXPORT SharedMemoryHandle {
explicit SharedMemoryHandle(const base::FileDescriptor& file_descriptor);
SharedMemoryHandle(int fd, bool auto_close);
+ // Makes a Mach-based SharedMemoryHandle of the given size. On error,
+ // subsequent calls to IsValid() return false.
+ explicit SharedMemoryHandle(mach_vm_size_t size);
+
+ // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
+ // in the task with process id |pid|. The memory region has size |size|.
+ SharedMemoryHandle(mach_port_t memory_object,
+ mach_vm_size_t size,
+ base::ProcessId pid);
+
// Standard copy constructor. The new instance shares the underlying OS
// primitives.
SharedMemoryHandle(const SharedMemoryHandle& handle);
@@ -74,7 +126,8 @@ class BASE_EXPORT SharedMemoryHandle {
// Returns the type.
Type GetType() const;
- // Whether the underlying OS primitive is valid.
+ // Whether the underlying OS primitive is valid. Once the SharedMemoryHandle
+ // is backed by a valid OS primitive, it becomes immutable.
bool IsValid() const;
// Sets the POSIX fd backing the SharedMemoryHandle. Requires that the
@@ -86,9 +139,46 @@ class BASE_EXPORT SharedMemoryHandle {
// uses of this method.
const FileDescriptor GetFileDescriptor() const;
+ // Exposed so that the SharedMemoryHandle can be transported between
+ // processes.
+ mach_port_t GetMemoryObject() const;
+
+ // Returns false on a failure to determine the size. On success, populates the
+ // output variable |size|.
+ bool GetSize(size_t* size) const;
+
+ // The SharedMemoryHandle must be valid.
+ // Returns whether the SharedMemoryHandle was successfully mapped into memory.
+ // On success, |memory| is an output variable that contains the start of the
+ // mapped memory.
+ bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
+
+ // Closes the underlying OS primitive.
+ void Close() const;
+
private:
+ // Shared code between copy constructor and operator=.
+ void CopyRelevantData(const SharedMemoryHandle& handle);
+
Type type_;
- FileDescriptor file_descriptor_;
+
+ // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
+ // mach port. |type_| determines the backing member.
+ union {
+ FileDescriptor file_descriptor_;
+
+ struct {
+ mach_port_t memory_object_;
+
+ // The size of the shared memory region when |type_| is MACH. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ mach_vm_size_t size_;
+
+ // The pid of the process in which |memory_object_| is usable. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ base::ProcessId pid_;
+ };
+ };
};
#endif
diff --git a/chromium/base/memory/shared_memory_handle_mac.cc b/chromium/base/memory/shared_memory_handle_mac.cc
index eb4f4654a93..13af82dbd95 100644
--- a/chromium/base/memory/shared_memory_handle_mac.cc
+++ b/chromium/base/memory/shared_memory_handle_mac.cc
@@ -4,11 +4,12 @@
#include "base/memory/shared_memory_handle.h"
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
#include <unistd.h>
#include "base/posix/eintr_wrapper.h"
-#if defined(OS_MACOSX) && !defined(OS_IOS)
namespace base {
static_assert(sizeof(SharedMemoryHandle::Type) <=
@@ -16,20 +17,43 @@ static_assert(sizeof(SharedMemoryHandle::Type) <=
"Size of enum SharedMemoryHandle::Type exceeds size of type "
"transmitted over wire.");
-SharedMemoryHandle::SharedMemoryHandle() : type_(POSIX), file_descriptor_() {
-}
+SharedMemoryHandle::SharedMemoryHandle() : type_(POSIX), file_descriptor_() {}
SharedMemoryHandle::SharedMemoryHandle(
const base::FileDescriptor& file_descriptor)
- : type_(POSIX), file_descriptor_(file_descriptor) {
-}
+ : type_(POSIX), file_descriptor_(file_descriptor) {}
SharedMemoryHandle::SharedMemoryHandle(int fd, bool auto_close)
- : type_(POSIX), file_descriptor_(fd, auto_close) {
+ : type_(POSIX), file_descriptor_(fd, auto_close) {}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
+ type_ = MACH;
+ mach_port_t named_right;
+ kern_return_t kr = mach_make_memory_entry_64(
+ mach_task_self(),
+ &size,
+ 0, // Address.
+ MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+ &named_right,
+ MACH_PORT_NULL); // Parent handle.
+ if (kr != KERN_SUCCESS) {
+ memory_object_ = MACH_PORT_NULL;
+ return;
+ }
+
+ memory_object_ = named_right;
+ size_ = size;
+ pid_ = GetCurrentProcId();
}
+SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
+ mach_vm_size_t size,
+ base::ProcessId pid)
+ : type_(MACH), memory_object_(memory_object), size_(size), pid_(pid) {}
+
SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle)
- : type_(handle.type_), file_descriptor_(handle.file_descriptor_) {
+ : type_(handle.type_) {
+ CopyRelevantData(handle);
}
SharedMemoryHandle& SharedMemoryHandle::operator=(
@@ -38,16 +62,48 @@ SharedMemoryHandle& SharedMemoryHandle::operator=(
return *this;
type_ = handle.type_;
- file_descriptor_ = handle.file_descriptor_;
+ CopyRelevantData(handle);
return *this;
}
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+ switch (type_) {
+ case POSIX: {
+ if (!IsValid())
+ return SharedMemoryHandle();
+
+ int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
+ if (duped_fd < 0)
+ return SharedMemoryHandle();
+ return SharedMemoryHandle(duped_fd, true);
+ }
+ case MACH: {
+ if (!IsValid())
+ return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
+
+ // Increment the ref count.
+ kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+ MACH_PORT_RIGHT_SEND, 1);
+ DCHECK_EQ(kr, KERN_SUCCESS);
+ return SharedMemoryHandle(*this);
+ }
+ }
+}
+
bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
- // Invalid handles are always equal, even if they have different types.
if (!IsValid() && !handle.IsValid())
return true;
- return type_ == handle.type_ && file_descriptor_ == handle.file_descriptor_;
+ if (type_ != handle.type_)
+ return false;
+
+ switch (type_) {
+ case POSIX:
+ return file_descriptor_ == handle.file_descriptor_;
+ case MACH:
+ return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+ pid_ == handle.pid_;
+ }
}
bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
@@ -63,14 +119,15 @@ bool SharedMemoryHandle::IsValid() const {
case POSIX:
return file_descriptor_.fd >= 0;
case MACH:
- return false;
+ return memory_object_ != MACH_PORT_NULL;
}
}
void SharedMemoryHandle::SetFileHandle(int fd, bool auto_close) {
- DCHECK_EQ(type_, POSIX);
+ DCHECK(!IsValid());
file_descriptor_.fd = fd;
file_descriptor_.auto_close = auto_close;
+ type_ = POSIX;
}
const FileDescriptor SharedMemoryHandle::GetFileDescriptor() const {
@@ -78,13 +135,87 @@ const FileDescriptor SharedMemoryHandle::GetFileDescriptor() const {
return file_descriptor_;
}
-SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- DCHECK_EQ(type_, POSIX);
- int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
- if (duped_handle < 0)
- return SharedMemoryHandle();
- return SharedMemoryHandle(duped_handle, true);
+mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+ DCHECK_EQ(type_, MACH);
+ return memory_object_;
+}
+
+bool SharedMemoryHandle::GetSize(size_t* size) const {
+ if (!IsValid())
+ return false;
+
+ switch (type_) {
+ case SharedMemoryHandle::POSIX:
+ struct stat st;
+ if (fstat(file_descriptor_.fd, &st) != 0)
+ return false;
+ if (st.st_size < 0)
+ return false;
+ *size = st.st_size;
+ return true;
+ case SharedMemoryHandle::MACH:
+ *size = size_;
+ return true;
+ }
+}
+
+bool SharedMemoryHandle::MapAt(off_t offset,
+ size_t bytes,
+ void** memory,
+ bool read_only) {
+ DCHECK(IsValid());
+ switch (type_) {
+ case SharedMemoryHandle::POSIX:
+ *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
+ MAP_SHARED, file_descriptor_.fd, offset);
+
+ return *memory && *memory != reinterpret_cast<void*>(-1);
+ case SharedMemoryHandle::MACH:
+ DCHECK_EQ(pid_, GetCurrentProcId());
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(),
+ reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
+ bytes,
+ 0, // Alignment mask
+ VM_FLAGS_ANYWHERE,
+ memory_object_,
+ offset,
+ FALSE, // Copy
+ VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
+ VM_PROT_READ | VM_PROT_WRITE, // Maximum protection
+ VM_INHERIT_NONE);
+ return kr == KERN_SUCCESS;
+ }
+}
+
+void SharedMemoryHandle::Close() const {
+ if (!IsValid())
+ return;
+
+ switch (type_) {
+ case POSIX:
+ if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+ DPLOG(ERROR) << "Error closing fd.";
+ break;
+ case MACH:
+ kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+ if (kr != KERN_SUCCESS)
+ DPLOG(ERROR) << "Error deallocating mach port: " << kr;
+ break;
+ }
+}
+
+void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
+ switch (type_) {
+ case POSIX:
+ file_descriptor_ = handle.file_descriptor_;
+ break;
+ case MACH:
+ memory_object_ = handle.memory_object_;
+ size_ = handle.size_;
+ pid_ = handle.pid_;
+ break;
+ }
}
} // namespace base
-#endif // defined(OS_MACOSX) && !defined(OS_IOS)
diff --git a/chromium/base/memory/shared_memory_handle_win.cc b/chromium/base/memory/shared_memory_handle_win.cc
new file mode 100644
index 00000000000..571e4ef1085
--- /dev/null
+++ b/chromium/base/memory/shared_memory_handle_win.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle()
+ : handle_(nullptr), pid_(kNullProcessId) {}
+
+SharedMemoryHandle::SharedMemoryHandle(HANDLE h, base::ProcessId pid)
+ : handle_(h), pid_(pid) {}
+
+SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle)
+ : handle_(handle.handle_), pid_(handle.pid_) {}
+
+SharedMemoryHandle& SharedMemoryHandle::operator=(
+ const SharedMemoryHandle& handle) {
+ if (this == &handle)
+ return *this;
+
+ handle_ = handle.handle_;
+ pid_ = handle.pid_;
+ return *this;
+}
+
+bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
+ // Invalid handles are always equal.
+ if (!IsValid() && !handle.IsValid())
+ return true;
+
+ return handle_ == handle.handle_ && pid_ == handle.pid_;
+}
+
+bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
+ return !(*this == handle);
+}
+
+void SharedMemoryHandle::Close() const {
+ DCHECK(handle_ != nullptr);
+ DCHECK(BelongsToCurrentProcess());
+ ::CloseHandle(handle_);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+ return handle_ != nullptr;
+}
+
+bool SharedMemoryHandle::BelongsToCurrentProcess() const {
+ return pid_ == base::GetCurrentProcId();
+}
+
+bool SharedMemoryHandle::NeedsBrokering() const {
+ return false;
+}
+
+HANDLE SharedMemoryHandle::GetHandle() const {
+ return handle_;
+}
+
+base::ProcessId SharedMemoryHandle::GetPID() const {
+ return pid_;
+}
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index 0ec8b8544fa..084b7256fb4 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -5,6 +5,7 @@
#include "base/memory/shared_memory.h"
#include <fcntl.h>
+#include <mach/mach_vm.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -53,8 +54,6 @@ bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
ScopedFILE* fp,
ScopedFD* readonly_fd,
FilePath* path) {
- // It doesn't make sense to have a open-existing private piece of shmem
- DCHECK(!options.open_existing_deprecated);
// Q: Why not use the shm_open() etc. APIs?
// A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
FilePath directory;
@@ -65,7 +64,7 @@ bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
tracked_objects::ScopedTracker tracking_profile(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
"466437 SharedMemory::Create::OpenTemporaryFile"));
- fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+ fp->reset(CreateAndOpenTemporaryFileInDir(directory, path));
// Deleting the file prevents anyone else from mapping it in (making it
// private), and prevents the need for cleanup (once the last fd is
@@ -92,30 +91,30 @@ bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
}
return true;
}
-}
+
+} // namespace
SharedMemory::SharedMemory()
- : mapped_file_(-1),
+ : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
readonly_mapped_file_(-1),
mapped_size_(0),
memory_(NULL),
read_only_(false),
- requested_size_(0) {
-}
+ requested_size_(0) {}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : mapped_file_(GetFdFromSharedMemoryHandle(handle)),
+ : shm_(handle),
+ mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
readonly_mapped_file_(-1),
mapped_size_(0),
memory_(NULL),
read_only_(read_only),
- requested_size_(0) {
-}
+ requested_size_(0) {}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle,
bool read_only,
ProcessHandle process)
- : mapped_file_(GetFdFromSharedMemoryHandle(handle)),
+ : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
readonly_mapped_file_(-1),
mapped_size_(0),
memory_(NULL),
@@ -143,14 +142,12 @@ SharedMemoryHandle SharedMemory::NULLHandle() {
// static
void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- DCHECK_GE(GetFdFromSharedMemoryHandle(handle), 0);
- if (close(GetFdFromSharedMemoryHandle(handle)) < 0)
- DPLOG(ERROR) << "close";
+ handle.Close();
}
// static
size_t SharedMemory::GetHandleLimit() {
- return base::GetMaxFds();
+ return GetMaxFds();
}
// static
@@ -170,27 +167,21 @@ bool SharedMemory::CreateAndMapAnonymous(size_t size) {
}
// static
-int SharedMemory::GetSizeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle) {
- struct stat st;
- if (fstat(GetFdFromSharedMemoryHandle(handle), &st) != 0)
- return -1;
- return st.st_size;
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t* size) {
+ return handle.GetSize(size);
}
// Chromium mostly only uses the unique/private shmem as specified by
// "name == L"". The exception is in the StatsTable.
-// TODO(jrg): there is no way to "clean up" all unused named shmem if
-// we restart from a crash. (That isn't a new problem, but it is a problem.)
-// In case we want to delete it later, it may be useful to save the value
-// of mem_filename after FilePathForMemoryName().
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
// is fixed.
tracked_objects::ScopedTracker tracking_profile1(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
"466437 SharedMemory::Create::Start"));
- DCHECK_EQ(-1, mapped_file_);
+ DCHECK(!shm_.IsValid());
if (options.size == 0) return false;
if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
@@ -202,177 +193,94 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
base::ThreadRestrictions::ScopedAllowIO allow_io;
ScopedFILE fp;
- bool fix_size = true;
ScopedFD readonly_fd;
FilePath path;
- if (options.name_deprecated == NULL || options.name_deprecated->empty()) {
- bool result =
- CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
- if (!result)
- return false;
- } else {
- if (!FilePathForMemoryName(*options.name_deprecated, &path))
- return false;
-
- // Make sure that the file is opened without any permission
- // to other users on the system.
- const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
-
- // First, try to create the file.
- int fd = HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly));
- if (fd == -1 && options.open_existing_deprecated) {
- // If this doesn't work, try and open an existing file in append mode.
- // Opening an existing file in a world writable directory has two main
- // security implications:
- // - Attackers could plant a file under their control, so ownership of
- // the file is checked below.
- // - Attackers could plant a symbolic link so that an unexpected file
- // is opened, so O_NOFOLLOW is passed to open().
- fd = HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW));
-
- // Check that the current user owns the file.
- // If uid != euid, then a more complex permission model is used and this
- // API is not appropriate.
- const uid_t real_uid = getuid();
- const uid_t effective_uid = geteuid();
- struct stat sb;
- if (fd >= 0 &&
- (fstat(fd, &sb) != 0 || sb.st_uid != real_uid ||
- sb.st_uid != effective_uid)) {
- LOG(ERROR) <<
- "Invalid owner when opening existing shared memory file.";
- close(fd);
- return false;
- }
-
- // An existing file was opened, so its size should not be fixed.
- fix_size = false;
- }
+ bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+ if (!result)
+ return false;
- if (options.share_read_only) {
- // Also open as readonly so that we can ShareReadOnlyToProcess.
- readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
- if (!readonly_fd.is_valid()) {
- DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
- close(fd);
- fd = -1;
- return false;
- }
- }
- if (fd >= 0) {
- // "a+" is always appropriate: if it's a new file, a+ is similar to w+.
- fp.reset(fdopen(fd, "a+"));
- }
- }
- if (fp && fix_size) {
- // Get current size.
- struct stat stat;
- if (fstat(fileno(fp.get()), &stat) != 0)
- return false;
- const size_t current_size = stat.st_size;
- if (current_size != options.size) {
- if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
- return false;
- }
- requested_size_ = options.size;
- }
- if (fp == NULL) {
+ if (!fp) {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
return false;
}
- return PrepareMapFile(fp.Pass(), readonly_fd.Pass());
-}
-
-// Our current implementation of shmem is with mmap()ing of files.
-// These files need to be deleted explicitly.
-// In practice this call is only needed for unit tests.
-bool SharedMemory::Delete(const std::string& name) {
- FilePath path;
- if (!FilePathForMemoryName(name, &path))
- return false;
-
- if (PathExists(path))
- return base::DeleteFile(path, false);
-
- // Doesn't exist, so success.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- FilePath path;
- if (!FilePathForMemoryName(name, &path))
- return false;
-
- read_only_ = read_only;
-
- const char *mode = read_only ? "r" : "r+";
- ScopedFILE fp(base::OpenFile(path, mode));
- ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
- if (!readonly_fd.is_valid()) {
- DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+ // Get current size.
+ struct stat stat;
+ if (fstat(fileno(fp.get()), &stat) != 0)
return false;
+ const size_t current_size = stat.st_size;
+ if (current_size != options.size) {
+ if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+ return false;
}
+ requested_size_ = options.size;
+
return PrepareMapFile(fp.Pass(), readonly_fd.Pass());
}
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (mapped_file_ == -1)
+ if (!shm_.IsValid())
return false;
-
if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
return false;
-
if (memory_)
return false;
- memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
- MAP_SHARED, mapped_file_, offset);
-
- bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
- if (mmap_succeeded) {
+ bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
+ if (success) {
mapped_size_ = bytes;
DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ mapped_memory_mechanism_ = shm_.GetType();
} else {
memory_ = NULL;
}
- return mmap_succeeded;
+ return success;
}
bool SharedMemory::Unmap() {
if (memory_ == NULL)
return false;
- munmap(memory_, mapped_size_);
- memory_ = NULL;
- mapped_size_ = 0;
- return true;
+ switch (mapped_memory_mechanism_) {
+ case SharedMemoryHandle::POSIX:
+ munmap(memory_, mapped_size_);
+ memory_ = NULL;
+ mapped_size_ = 0;
+ return true;
+ case SharedMemoryHandle::MACH:
+ mach_vm_deallocate(mach_task_self(),
+ reinterpret_cast<mach_vm_address_t>(memory_),
+ mapped_size_);
+ return true;
+ }
}
SharedMemoryHandle SharedMemory::handle() const {
- return SharedMemoryHandle(mapped_file_, false);
+ switch (shm_.GetType()) {
+ case SharedMemoryHandle::POSIX:
+ return SharedMemoryHandle(shm_.GetFileDescriptor().fd, false);
+ case SharedMemoryHandle::MACH:
+ return shm_;
+ }
}
void SharedMemory::Close() {
- if (mapped_file_ > 0) {
- if (close(mapped_file_) < 0)
- PLOG(ERROR) << "close";
- mapped_file_ = -1;
- }
- if (readonly_mapped_file_ > 0) {
- if (close(readonly_mapped_file_) < 0)
- PLOG(ERROR) << "close";
- readonly_mapped_file_ = -1;
+ shm_.Close();
+ shm_ = SharedMemoryHandle();
+ if (shm_.GetType() == SharedMemoryHandle::POSIX) {
+ if (readonly_mapped_file_ > 0) {
+ if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+ PLOG(ERROR) << "close";
+ readonly_mapped_file_ = -1;
+ }
}
}
bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
- DCHECK_EQ(-1, mapped_file_);
+ DCHECK(!shm_.IsValid());
DCHECK_EQ(-1, readonly_mapped_file_);
if (fp == NULL)
return false;
@@ -395,8 +303,8 @@ bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
}
}
- mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
- if (mapped_file_ == -1) {
+ int mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
+ if (mapped_file == -1) {
if (errno == EMFILE) {
LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
return false;
@@ -404,38 +312,21 @@ bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
NOTREACHED() << "Call to dup failed, errno=" << errno;
}
}
+ shm_ = SharedMemoryHandle(mapped_file, false);
readonly_mapped_file_ = readonly_fd.release();
return true;
}
-// For the given shmem named |mem_name|, return a filename to mmap()
-// (and possibly create). Modifies |filename|. Return false on
-// error, or true of we are happy.
-bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
- FilePath* path) {
- // mem_name will be used for a filename; make sure it doesn't
- // contain anything which will confuse us.
- DCHECK_EQ(std::string::npos, mem_name.find('/'));
- DCHECK_EQ(std::string::npos, mem_name.find('\0'));
-
- FilePath temp_dir;
- if (!GetShmemTempDir(false, &temp_dir))
- return false;
-
- std::string name_base = std::string(base::mac::BaseBundleID());
- *path = temp_dir.AppendASCII(name_base + ".shmem." + mem_name);
- return true;
-}
-
bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
SharedMemoryHandle* new_handle,
bool close_self,
ShareMode share_mode) {
+ DCHECK_NE(shm_.GetType(), SharedMemoryHandle::MACH);
int handle_to_dup = -1;
- switch(share_mode) {
+ switch (share_mode) {
case SHARE_CURRENT_MODE:
- handle_to_dup = mapped_file_;
+ handle_to_dup = shm_.GetFileDescriptor().fd;
break;
case SHARE_READONLY:
// We could imagine re-opening the file from /dev/fd, but that can't make
diff --git a/chromium/base/memory/shared_memory_mac_unittest.cc b/chromium/base/memory/shared_memory_mac_unittest.cc
new file mode 100644
index 00000000000..5e03670f963
--- /dev/null
+++ b/chromium/base/memory/shared_memory_mac_unittest.cc
@@ -0,0 +1,323 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <servers/bootstrap.h>
+
+#include "base/command_line.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+static const std::string g_service_switch_name = "service_name";
+
+// Structs used to pass a mach port from client to server.
+struct MachSendPortMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t data;
+};
+struct MachReceivePortMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t data;
+ mach_msg_trailer_t trailer;
+};
+
+// Makes the current process into a Mach Server with the given |service_name|.
+mach_port_t BecomeMachServer(const char* service_name) {
+ mach_port_t port;
+ kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
+ return port;
+}
+
+// Returns the mach port for the Mach Server with the given |service_name|.
+mach_port_t LookupServer(const char* service_name) {
+ mach_port_t server_port;
+ kern_return_t kr =
+ bootstrap_look_up(bootstrap_port, service_name, &server_port);
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
+ return server_port;
+}
+
+mach_port_t MakeReceivingPort() {
+ mach_port_t client_port;
+ kern_return_t kr =
+ mach_port_allocate(mach_task_self(), // our task is acquiring
+ MACH_PORT_RIGHT_RECEIVE, // a new receive right
+ &client_port); // with this name
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
+ return client_port;
+}
+
+// Blocks until a mach message is sent to |server_port|. This mach message
+// must contain a mach port. Returns that mach port.
+mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
+ MachReceivePortMessage recv_msg;
+ mach_msg_header_t* recv_hdr = &(recv_msg.header);
+ recv_hdr->msgh_local_port = port_to_listen_on;
+ recv_hdr->msgh_size = sizeof(recv_msg);
+ kern_return_t kr =
+ mach_msg(recv_hdr, // message buffer
+ MACH_RCV_MSG, // option indicating service
+ 0, // send size
+ recv_hdr->msgh_size, // size of header + body
+ port_to_listen_on, // receive name
+ MACH_MSG_TIMEOUT_NONE, // no timeout, wait forever
+ MACH_PORT_NULL); // no notification port
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
+ mach_port_t other_task_port = recv_msg.data.name;
+ return other_task_port;
+}
+
+// Passes a copy of the send right of |port_to_send| to |receiving_port|.
+void SendMachPort(mach_port_t receiving_port,
+ mach_port_t port_to_send,
+ int disposition) {
+ MachSendPortMessage send_msg;
+ mach_msg_header_t* send_hdr;
+ send_hdr = &(send_msg.header);
+ send_hdr->msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
+ send_hdr->msgh_size = sizeof(send_msg);
+ send_hdr->msgh_remote_port = receiving_port;
+ send_hdr->msgh_local_port = MACH_PORT_NULL;
+ send_hdr->msgh_reserved = 0;
+ send_msg.body.msgh_descriptor_count = 1;
+ send_msg.data.name = port_to_send;
+ send_msg.data.disposition = disposition;
+ send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+ int kr = mach_msg(send_hdr, // message buffer
+ MACH_SEND_MSG, // option indicating send
+ send_hdr->msgh_size, // size of header + body
+ 0, // receive limit
+ MACH_PORT_NULL, // receive name
+ MACH_MSG_TIMEOUT_NONE, // no timeout, wait forever
+ MACH_PORT_NULL); // no notification port
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
+}
+
+std::string CreateRandomServiceName() {
+ return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
+}
+
+// Sets up the mach communication ports with the server. Returns a port to which
+// the server will send mach objects.
+mach_port_t CommonChildProcessSetUp() {
+ CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+ std::string service_name =
+ cmd_line.GetSwitchValueASCII(g_service_switch_name);
+ mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
+ mach_port_t client_port = MakeReceivingPort();
+
+ // Send the port that this process is listening on to the server.
+ SendMachPort(server_port, client_port, MACH_MSG_TYPE_MAKE_SEND);
+ return client_port;
+}
+
+// The number of active names in the current task's port name space.
+mach_msg_type_number_t GetActiveNameCount() {
+ mach_port_name_array_t name_array;
+ mach_msg_type_number_t names_count;
+ mach_port_type_array_t type_array;
+ mach_msg_type_number_t types_count;
+ kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
+ &names_count, &type_array, &types_count);
+ MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
+ return names_count;
+}
+
+} // namespace
+
+class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
+ public:
+ SharedMemoryMacMultiProcessTest() {}
+
+ CommandLine MakeCmdLine(const std::string& procname) override {
+ CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
+ // Pass the service name to the child process.
+ command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
+ return command_line;
+ }
+
+ void SetUpChild(const std::string& name) {
+ // Make a random service name so that this test doesn't conflict with other
+ // similar tests.
+ service_name_ = CreateRandomServiceName();
+ server_port_.reset(BecomeMachServer(service_name_.c_str()));
+ child_process_ = SpawnChild(name);
+ client_port_.reset(ReceiveMachPort(server_port_));
+ }
+
+ static const int s_memory_size = 99999;
+
+ protected:
+ std::string service_name_;
+
+ // A port on which the main process listens for mach messages from the child
+ // process.
+ mac::ScopedMachReceiveRight server_port_;
+
+ // A port on which the child process listens for mach messages from the main
+ // process.
+ mac::ScopedMachSendRight client_port_;
+
+ base::Process child_process_;
+ DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
+};
+
+// Tests that content written to shared memory in the server process can be read
+// by the child process.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
+ SetUpChild("MachBasedSharedMemoryClient");
+
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ SharedMemory shared_memory(shm, false);
+ shared_memory.Map(s_memory_size);
+ memset(shared_memory.memory(), 'a', s_memory_size);
+
+ // Send the underlying memory object to the client process.
+ SendMachPort(client_port_, shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+ int rv = -1;
+ ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &rv));
+ EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
+ mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+ // The next mach port should be for a memory object.
+ mach_port_t memory_object = ReceiveMachPort(client_port);
+ SharedMemoryHandle shm(memory_object,
+ SharedMemoryMacMultiProcessTest::s_memory_size,
+ GetCurrentProcId());
+ SharedMemory shared_memory(shm, false);
+ shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
+ const char* start = static_cast<const char*>(shared_memory.memory());
+ for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
+ DCHECK_EQ(start[i], 'a');
+ }
+ return 0;
+}
+
+// Tests that mapping shared memory with an offset works correctly.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
+ SetUpChild("MachBasedSharedMemoryWithOffsetClient");
+
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ SharedMemory shared_memory(shm, false);
+ shared_memory.Map(s_memory_size);
+
+ size_t page_size = SysInfo::VMAllocationGranularity();
+ char* start = static_cast<char*>(shared_memory.memory());
+ memset(start, 'a', page_size);
+ memset(start + page_size, 'b', page_size);
+ memset(start + 2 * page_size, 'c', page_size);
+
+ // Send the underlying memory object to the client process.
+ SendMachPort(client_port_, shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+ int rv = -1;
+ ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+ TestTimeouts::action_timeout(), &rv));
+ EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
+ mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+ // The next mach port should be for a memory object.
+ mach_port_t memory_object = ReceiveMachPort(client_port);
+ SharedMemoryHandle shm(memory_object,
+ SharedMemoryMacMultiProcessTest::s_memory_size,
+ GetCurrentProcId());
+ SharedMemory shared_memory(shm, false);
+ size_t page_size = SysInfo::VMAllocationGranularity();
+ shared_memory.MapAt(page_size, 2 * page_size);
+ const char* start = static_cast<const char*>(shared_memory.memory());
+ for (size_t i = 0; i < page_size; ++i) {
+ DCHECK_EQ(start[i], 'b');
+ }
+ for (size_t i = page_size; i < 2 * page_size; ++i) {
+ DCHECK_EQ(start[i], 'c');
+ }
+ return 0;
+}
+
+// Tests that duplication and closing has the right effect on Mach reference
+// counts.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ // Making a new SharedMemoryHandle increments the name count.
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
+ // make a new name.
+ shm.Duplicate();
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Closing the SharedMemoryHandle decrements the ref count. The first time has
+ // no effect.
+ shm.Close();
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Closing the SharedMemoryHandle decrements the ref count. The second time
+ // destroys the port.
+ shm.Close();
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
+// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
+// as well.
+TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
+ mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+ // Making a new SharedMemoryHandle increments the name count.
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Name count doesn't change when mapping the memory.
+ scoped_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+ shared_memory->Map(s_memory_size);
+ EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+ // Destroying the SharedMemory object frees the resource.
+ shared_memory.reset();
+ EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the read-only flag works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
+ SharedMemoryHandle shm(s_memory_size);
+ ASSERT_TRUE(shm.IsValid());
+ SharedMemory shared_memory(shm, false);
+ shared_memory.Map(s_memory_size);
+ memset(shared_memory.memory(), 'a', s_memory_size);
+
+ SharedMemoryHandle shm2 = shm.Duplicate();
+ ASSERT_TRUE(shm2.IsValid());
+ SharedMemory shared_memory2(shm, true);
+ shared_memory2.Map(s_memory_size);
+ ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index 2e66b34cfcd..96344c6c877 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -147,7 +147,7 @@ SharedMemoryHandle SharedMemory::NULLHandle() {
// static
void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
DCHECK_GE(handle.fd, 0);
- if (close(handle.fd) < 0)
+ if (IGNORE_EINTR(close(handle.fd)) < 0)
DPLOG(ERROR) << "close";
}
@@ -177,12 +177,16 @@ bool SharedMemory::CreateAndMapAnonymous(size_t size) {
#if !defined(OS_ANDROID)
// static
-int SharedMemory::GetSizeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle) {
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle,
+ size_t* size) {
struct stat st;
if (fstat(handle.fd, &st) != 0)
- return -1;
- return st.st_size;
+ return false;
+ if (st.st_size < 0)
+ return false;
+ *size = st.st_size;
+ return true;
}
// Chromium mostly only uses the unique/private shmem as specified by
@@ -388,12 +392,12 @@ SharedMemoryHandle SharedMemory::handle() const {
void SharedMemory::Close() {
if (mapped_file_ > 0) {
- if (close(mapped_file_) < 0)
+ if (IGNORE_EINTR(close(mapped_file_)) < 0)
PLOG(ERROR) << "close";
mapped_file_ = -1;
}
if (readonly_mapped_file_ > 0) {
- if (close(readonly_mapped_file_) < 0)
+ if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
PLOG(ERROR) << "close";
readonly_mapped_file_ = -1;
}
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index c129e18d4c3..86fb3ac7547 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -16,10 +16,6 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
-#if defined(OS_MACOSX)
-#include "base/mac/scoped_nsautorelease_pool.h"
-#endif
-
#if defined(OS_POSIX)
#include <errno.h>
#include <fcntl.h>
@@ -33,15 +29,11 @@
#include "base/win/scoped_handle.h"
#endif
-static const int kNumThreads = 5;
-#if !defined(OS_IOS) && !defined(OS_ANDROID)
-static const int kNumTasks = 5;
-#endif
-
namespace base {
namespace {
+#if !defined(OS_MACOSX)
// Each thread will open the shared memory. Each thread will take a different 4
// byte int pointer, and keep changing it, with some small pauses in between.
// Verify that each thread's value in the shared memory is always correct.
@@ -57,21 +49,18 @@ class MultipleThreadMain : public PlatformThread::Delegate {
// PlatformThread::Delegate interface.
void ThreadMain() override {
-#if defined(OS_MACOSX)
- mac::ScopedNSAutoreleasePool pool;
-#endif
const uint32 kDataSize = 1024;
SharedMemory memory;
bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
EXPECT_TRUE(rv);
rv = memory.Map(kDataSize);
EXPECT_TRUE(rv);
- int *ptr = static_cast<int*>(memory.memory()) + id_;
+ int* ptr = static_cast<int*>(memory.memory()) + id_;
EXPECT_EQ(0, *ptr);
for (int idx = 0; idx < 100; idx++) {
*ptr = idx;
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
EXPECT_EQ(*ptr, idx);
}
// Reset back to 0 for the next test that uses the same name.
@@ -90,12 +79,13 @@ class MultipleThreadMain : public PlatformThread::Delegate {
const char* const MultipleThreadMain::s_test_name_ =
"SharedMemoryOpenThreadTest";
+#endif // !defined(OS_MACOSX)
} // namespace
-// Android doesn't support SharedMemory::Open/Delete/
+// Android/Mac doesn't support SharedMemory::Open/Delete/
// CreateNamedDeprecated(openExisting=true)
-#if !defined(OS_ANDROID)
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX)
TEST(SharedMemoryTest, OpenClose) {
const uint32 kDataSize = 1024;
std::string test_name = "SharedMemoryOpenCloseTest";
@@ -130,8 +120,8 @@ TEST(SharedMemoryTest, OpenClose) {
// Close the first memory segment, and verify the second has the right data.
memory1.Close();
- char *start_ptr = static_cast<char *>(memory2.memory());
- char *end_ptr = start_ptr + kDataSize;
+ char* start_ptr = static_cast<char*>(memory2.memory());
+ char* end_ptr = start_ptr + kDataSize;
for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
EXPECT_EQ(*ptr, '1');
@@ -169,7 +159,7 @@ TEST(SharedMemoryTest, OpenExclusive) {
// The mapped memory1 shouldn't exceed rounding for allocation granularity.
EXPECT_LT(memory1.mapped_size(),
- kDataSize + base::SysInfo::VMAllocationGranularity());
+ kDataSize + SysInfo::VMAllocationGranularity());
memset(memory1.memory(), 'G', kDataSize);
@@ -194,11 +184,11 @@ TEST(SharedMemoryTest, OpenExclusive) {
// The mapped memory2 shouldn't exceed rounding for allocation granularity.
EXPECT_LT(memory2.mapped_size(),
- kDataSize2 + base::SysInfo::VMAllocationGranularity());
+ kDataSize2 + SysInfo::VMAllocationGranularity());
// Verify that opening memory2 didn't truncate or delete memory 1.
- char *start_ptr = static_cast<char *>(memory2.memory());
- char *end_ptr = start_ptr + kDataSize;
+ char* start_ptr = static_cast<char*>(memory2.memory());
+ char* end_ptr = start_ptr + kDataSize;
for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
EXPECT_EQ(*ptr, 'G');
}
@@ -209,7 +199,7 @@ TEST(SharedMemoryTest, OpenExclusive) {
rv = memory1.Delete(test_name);
EXPECT_TRUE(rv);
}
-#endif
+#endif // !defined(OS_ANDROID) && !defined(OS_MACOSX)
// Check that memory is still mapped after its closed.
TEST(SharedMemoryTest, CloseNoUnmap) {
@@ -234,9 +224,12 @@ TEST(SharedMemoryTest, CloseNoUnmap) {
EXPECT_EQ(nullptr, memory.memory());
}
+#if !defined(OS_MACOSX)
// Create a set of N threads to each open a shared memory segment and write to
// it. Verify that they are always reading/writing consistent data.
TEST(SharedMemoryTest, MultipleThreads) {
+ const int kNumThreads = 5;
+
MultipleThreadMain::CleanUp();
// On POSIX we have a problem when 2 threads try to create the shmem
// (a file) at exactly the same time, since create both creates the
@@ -270,6 +263,7 @@ TEST(SharedMemoryTest, MultipleThreads) {
}
MultipleThreadMain::CleanUp();
}
+#endif
// Allocate private (unique) shared memory with an empty string for a
// name. Make sure several of them don't point to the same thing as
@@ -288,7 +282,7 @@ TEST(SharedMemoryTest, AnonymousPrivate) {
for (i = 0; i < count; i++) {
rv = memories[i].CreateAndMapAnonymous(kDataSize);
EXPECT_TRUE(rv);
- int *ptr = static_cast<int*>(memories[i].memory());
+ int* ptr = static_cast<int*>(memories[i].memory());
EXPECT_TRUE(ptr);
pointers[i] = ptr;
}
@@ -375,32 +369,24 @@ TEST(SharedMemoryTest, ShareReadOnly) {
EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
#elif defined(OS_WIN)
- EXPECT_EQ(NULL, MapViewOfFile(handle, FILE_MAP_WRITE, 0, 0, 0))
+ EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
<< "Shouldn't be able to map memory writable.";
HANDLE temp_handle;
- BOOL rv = ::DuplicateHandle(GetCurrentProcess(),
- handle,
- GetCurrentProcess(),
- &temp_handle,
- FILE_MAP_ALL_ACCESS,
- false,
- 0);
+ BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+ GetCurrentProcess(), &temp_handle,
+ FILE_MAP_ALL_ACCESS, false, 0);
EXPECT_EQ(FALSE, rv)
<< "Shouldn't be able to duplicate the handle into a writable one.";
if (rv)
- base::win::ScopedHandle writable_handle(temp_handle);
- rv = ::DuplicateHandle(GetCurrentProcess(),
- handle,
- GetCurrentProcess(),
- &temp_handle,
- FILE_MAP_READ,
- false,
- 0);
+ win::ScopedHandle writable_handle(temp_handle);
+ rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+ GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
+ false, 0);
EXPECT_EQ(TRUE, rv)
<< "Should be able to duplicate the handle into a readable one.";
if (rv)
- base::win::ScopedHandle writable_handle(temp_handle);
+ win::ScopedHandle writable_handle(temp_handle);
#else
#error Unexpected platform; write a test that tries to make 'handle' writable.
#endif // defined(OS_POSIX) || defined(OS_WIN)
@@ -423,6 +409,7 @@ TEST(SharedMemoryTest, ShareToSelf) {
contents,
StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
+ shared_handle = SharedMemoryHandle();
ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
SharedMemory readonly(shared_handle, /*readonly=*/true);
@@ -532,25 +519,20 @@ TEST(SharedMemoryTest, FilePermissionsAnonymous) {
// Create a shared memory object, check its permissions.
TEST(SharedMemoryTest, FilePermissionsNamed) {
- const uint32 kTestSize = 1 << 8;
+ const uint32_t kTestSize = 1 << 8;
SharedMemory shared_memory;
SharedMemoryCreateOptions options;
options.size = kTestSize;
- std::string shared_mem_name = "shared_perm_test-" + IntToString(getpid()) +
- "-" + Uint64ToString(RandUint64());
- options.name_deprecated = &shared_mem_name;
+
// Set a file mode creation mask that gives all permissions.
ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
EXPECT_TRUE(shared_memory.Create(options));
- // Clean-up the backing file name immediately, we don't need it.
- EXPECT_TRUE(shared_memory.Delete(shared_mem_name));
- int shm_fd =
- SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+ int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
struct stat shm_stat;
- EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
+ EXPECT_EQ(0, fstat(fd, &shm_stat));
// Neither the group, nor others should have been able to open the shared
// memory file while its name existed.
EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
@@ -574,14 +556,13 @@ TEST(SharedMemoryTest, MapMinimumAlignment) {
}
// iOS does not allow multiple processes.
-// Android ashmem doesn't support named shared memory.
-#if !defined(OS_IOS) && !defined(OS_ANDROID)
-
+// Android ashmem does not support named shared memory.
+// Mac SharedMemory does not support named shared memory. crbug.com/345734
+#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
// On POSIX it is especially important we test shmem across processes,
// not just across threads. But the test is enabled on all platforms.
class SharedMemoryProcessTest : public MultiProcessTest {
public:
-
static void CleanUp() {
SharedMemory memory;
memory.Delete(s_test_name_);
@@ -589,9 +570,6 @@ class SharedMemoryProcessTest : public MultiProcessTest {
static int TaskTestMain() {
int errors = 0;
-#if defined(OS_MACOSX)
- mac::ScopedNSAutoreleasePool pool;
-#endif
SharedMemory memory;
bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
EXPECT_TRUE(rv);
@@ -601,10 +579,10 @@ class SharedMemoryProcessTest : public MultiProcessTest {
EXPECT_TRUE(rv);
if (rv != true)
errors++;
- int *ptr = static_cast<int*>(memory.memory());
+ int* ptr = static_cast<int*>(memory.memory());
// This runs concurrently in multiple processes. Writes need to be atomic.
- base::subtle::Barrier_AtomicIncrement(ptr, 1);
+ subtle::Barrier_AtomicIncrement(ptr, 1);
memory.Close();
return errors;
}
@@ -617,6 +595,8 @@ const char* const SharedMemoryProcessTest::s_test_name_ = "MPMem";
const uint32 SharedMemoryProcessTest::s_data_size_ = 1024;
TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
+ const int kNumTasks = 5;
+
SharedMemoryProcessTest::CleanUp();
// Create a shared memory region. Set the first word to 0.
@@ -653,7 +633,6 @@ TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
return SharedMemoryProcessTest::TaskTestMain();
}
-
-#endif // !defined(OS_IOS) && !defined(OS_ANDROID)
+#endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
} // namespace base
diff --git a/chromium/base/memory/shared_memory_win.cc b/chromium/base/memory/shared_memory_win.cc
index 5f706fe6485..3eef9a94d33 100644
--- a/chromium/base/memory/shared_memory_win.cc
+++ b/chromium/base/memory/shared_memory_win.cc
@@ -45,11 +45,12 @@ SharedMemory::SharedMemory(const std::wstring& name)
}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : mapped_file_(handle),
+ : mapped_file_(handle.GetHandle()),
mapped_size_(0),
memory_(NULL),
read_only_(read_only),
requested_size_(0) {
+ DCHECK(!handle.IsValid() || handle.BelongsToCurrentProcess());
}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle,
@@ -60,11 +61,9 @@ SharedMemory::SharedMemory(const SharedMemoryHandle& handle,
memory_(NULL),
read_only_(read_only),
requested_size_(0) {
- ::DuplicateHandle(process, handle,
- GetCurrentProcess(), &mapped_file_,
- read_only_ ? FILE_MAP_READ : FILE_MAP_READ |
- FILE_MAP_WRITE,
- FALSE, 0);
+ ::DuplicateHandle(
+ process, handle.GetHandle(), GetCurrentProcess(), &mapped_file_,
+ read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE, FALSE, 0);
}
SharedMemory::~SharedMemory() {
@@ -74,18 +73,17 @@ SharedMemory::~SharedMemory() {
// static
bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
- return handle != NULL;
+ return handle.IsValid();
}
// static
SharedMemoryHandle SharedMemory::NULLHandle() {
- return NULL;
+ return SharedMemoryHandle();
}
// static
void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- DCHECK(handle != NULL);
- ::CloseHandle(handle);
+ handle.Close();
}
// static
@@ -98,13 +96,15 @@ size_t SharedMemory::GetHandleLimit() {
// static
SharedMemoryHandle SharedMemory::DuplicateHandle(
const SharedMemoryHandle& handle) {
+ DCHECK(handle.BelongsToCurrentProcess());
+ HANDLE duped_handle;
ProcessHandle process = GetCurrentProcess();
- SharedMemoryHandle duped_handle;
- BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle, 0,
- FALSE, DUPLICATE_SAME_ACCESS);
+ BOOL success =
+ ::DuplicateHandle(process, handle.GetHandle(), process, &duped_handle, 0,
+ FALSE, DUPLICATE_SAME_ACCESS);
if (success)
- return duped_handle;
- return NULLHandle();
+ return SharedMemoryHandle(duped_handle, GetCurrentProcId());
+ return SharedMemoryHandle();
}
bool SharedMemory::CreateAndMapAnonymous(size_t size) {
@@ -230,7 +230,7 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
SharedMemoryHandle* new_handle,
bool close_self,
ShareMode share_mode) {
- *new_handle = 0;
+ *new_handle = SharedMemoryHandle();
DWORD access = FILE_MAP_READ;
DWORD options = 0;
HANDLE mapped_file = mapped_file_;
@@ -245,7 +245,7 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
}
if (process == GetCurrentProcess() && close_self) {
- *new_handle = mapped_file;
+ *new_handle = SharedMemoryHandle(mapped_file, base::GetCurrentProcId());
return true;
}
@@ -253,20 +253,20 @@ bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
access, FALSE, options)) {
return false;
}
- *new_handle = result;
+ *new_handle = SharedMemoryHandle(result, base::GetProcId(process));
return true;
}
void SharedMemory::Close() {
if (mapped_file_ != NULL) {
- CloseHandle(mapped_file_);
+ ::CloseHandle(mapped_file_);
mapped_file_ = NULL;
}
}
SharedMemoryHandle SharedMemory::handle() const {
- return mapped_file_;
+ return SharedMemoryHandle(mapped_file_, base::GetCurrentProcId());
}
} // namespace base
diff --git a/chromium/base/memory/singleton.h b/chromium/base/memory/singleton.h
index e50bdc05f34..73196990022 100644
--- a/chromium/base/memory/singleton.h
+++ b/chromium/base/memory/singleton.h
@@ -36,10 +36,10 @@ static const subtle::AtomicWord kBeingCreatedMarker = 1;
// we can implement the more complicated pieces out of line in the .cc file.
BASE_EXPORT subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance);
+class DeleteTraceLogForTesting;
+
} // namespace internal
-} // namespace base
-// TODO(joth): Move more of this file into namespace base
// Default traits for Singleton<Type>. Calls operator new and operator delete on
// the object. Registers automatic deletion at process exit.
@@ -110,7 +110,7 @@ struct StaticMemorySingletonTraits {
// this is traits for returning NULL.
static Type* New() {
// Only constructs once and returns pointer; otherwise returns NULL.
- if (base::subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
return NULL;
return new(buffer_.void_data()) Type();
@@ -125,20 +125,19 @@ struct StaticMemorySingletonTraits {
static const bool kAllowedToAccessOnNonjoinableThread = true;
// Exposed for unittesting.
- static void Resurrect() {
- base::subtle::NoBarrier_Store(&dead_, 0);
- }
+ static void Resurrect() { subtle::NoBarrier_Store(&dead_, 0); }
private:
- static base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
+ static AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
// Signal the object was already deleted, so it is not revived.
- static base::subtle::Atomic32 dead_;
+ static subtle::Atomic32 dead_;
};
-template <typename Type> base::AlignedMemory<sizeof(Type), ALIGNOF(Type)>
+template <typename Type>
+AlignedMemory<sizeof(Type), ALIGNOF(Type)>
StaticMemorySingletonTraits<Type>::buffer_;
-template <typename Type> base::subtle::Atomic32
- StaticMemorySingletonTraits<Type>::dead_ = 0;
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// The Singleton<Type, Traits, DifferentiatingType> class manages a single
// instance of Type which will be created on first use and will be destroyed at
@@ -190,7 +189,7 @@ template <typename Type> base::subtle::Atomic32
// RAE = kRegisterAtExit
//
// On every platform, if Traits::RAE is true, the singleton will be destroyed at
-// process exit. More precisely it uses base::AtExitManager which requires an
+// process exit. More precisely it uses AtExitManager which requires an
// object of this type to be instantiated. AtExitManager mimics the semantics
// of atexit() such as LIFO order but under Windows is safer to call. For more
// information see at_exit.h.
@@ -209,6 +208,7 @@ template <typename Type> base::subtle::Atomic32
// (b) Your factory function must never throw an exception. This class is not
// exception-safe.
//
+
template <typename Type,
typename Traits = DefaultSingletonTraits<Type>,
typename DifferentiatingType = Type>
@@ -219,7 +219,7 @@ class Singleton {
friend Type* Type::GetInstance();
// Allow TraceLog tests to test tracing after OnExit.
- friend class DeleteTraceLogForTesting;
+ friend class internal::DeleteTraceLogForTesting;
// This class is safe to be constructed and copy-constructed since it has no
// member.
@@ -229,36 +229,36 @@ class Singleton {
#ifndef NDEBUG
// Avoid making TLS lookup on release builds.
if (!Traits::kAllowedToAccessOnNonjoinableThread)
- base::ThreadRestrictions::AssertSingletonAllowed();
+ ThreadRestrictions::AssertSingletonAllowed();
#endif
// The load has acquire memory ordering as the thread which reads the
// instance_ pointer must acquire visibility over the singleton data.
- base::subtle::AtomicWord value = base::subtle::Acquire_Load(&instance_);
- if (value != 0 && value != base::internal::kBeingCreatedMarker) {
+ subtle::AtomicWord value = subtle::Acquire_Load(&instance_);
+ if (value != 0 && value != internal::kBeingCreatedMarker) {
return reinterpret_cast<Type*>(value);
}
// Object isn't created yet, maybe we will get to create it, let's try...
- if (base::subtle::Acquire_CompareAndSwap(
- &instance_, 0, base::internal::kBeingCreatedMarker) == 0) {
+ if (subtle::Acquire_CompareAndSwap(&instance_, 0,
+ internal::kBeingCreatedMarker) == 0) {
// instance_ was NULL and is now kBeingCreatedMarker. Only one thread
// will ever get here. Threads might be spinning on us, and they will
// stop right after we do this store.
Type* newval = Traits::New();
// Releases the visibility over instance_ to the readers.
- base::subtle::Release_Store(
- &instance_, reinterpret_cast<base::subtle::AtomicWord>(newval));
+ subtle::Release_Store(&instance_,
+ reinterpret_cast<subtle::AtomicWord>(newval));
if (newval != NULL && Traits::kRegisterAtExit)
- base::AtExitManager::RegisterCallback(OnExit, NULL);
+ AtExitManager::RegisterCallback(OnExit, NULL);
return newval;
}
// We hit a race. Wait for the other thread to complete it.
- value = base::internal::WaitForInstance(&instance_);
+ value = internal::WaitForInstance(&instance_);
return reinterpret_cast<Type*>(value);
}
@@ -269,15 +269,15 @@ class Singleton {
static void OnExit(void* /*unused*/) {
// AtExit should only ever be register after the singleton instance was
// created. We should only ever get here with a valid instance_ pointer.
- Traits::Delete(
- reinterpret_cast<Type*>(base::subtle::NoBarrier_Load(&instance_)));
+ Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
instance_ = 0;
}
- static base::subtle::AtomicWord instance_;
+ static subtle::AtomicWord instance_;
};
template <typename Type, typename Traits, typename DifferentiatingType>
-base::subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::
- instance_ = 0;
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+} // namespace base
#endif // BASE_MEMORY_SINGLETON_H_
diff --git a/chromium/base/memory/singleton_unittest.cc b/chromium/base/memory/singleton_unittest.cc
index dbff007ada7..e8788babdfd 100644
--- a/chromium/base/memory/singleton_unittest.cc
+++ b/chromium/base/memory/singleton_unittest.cc
@@ -6,6 +6,7 @@
#include "base/memory/singleton.h"
#include "testing/gtest/include/gtest/gtest.h"
+namespace base {
namespace {
COMPILE_ASSERT(DefaultSingletonTraits<int>::kRegisterAtExit == true, a);
@@ -115,7 +116,7 @@ class AlignedTestSingleton {
~AlignedTestSingleton() {}
static AlignedTestSingleton* GetInstance() {
return Singleton<AlignedTestSingleton,
- StaticMemorySingletonTraits<AlignedTestSingleton> >::get();
+ StaticMemorySingletonTraits<AlignedTestSingleton>>::get();
}
Type type_;
@@ -147,7 +148,6 @@ CallbackFunc* GetStaticSingleton() {
return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
}
-} // namespace
class SingletonTest : public testing::Test {
public:
@@ -207,7 +207,7 @@ TEST_F(SingletonTest, Basic) {
CallbackFunc* static_singleton;
{
- base::ShadowingAtExitManager sem;
+ ShadowingAtExitManager sem;
{
singleton_int = SingletonInt();
}
@@ -241,7 +241,7 @@ TEST_F(SingletonTest, Basic) {
EXPECT_EQ(NULL, GetStaticSingleton());
{
- base::ShadowingAtExitManager sem;
+ ShadowingAtExitManager sem;
// Verifiy that the variables were reset.
{
singleton_int = SingletonInt();
@@ -285,3 +285,6 @@ TEST_F(SingletonTest, Alignment) {
EXPECT_ALIGNED(align128, 128);
EXPECT_ALIGNED(align4096, 4096);
}
+
+} // namespace
+} // namespace base
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index 8a433922fdf..1230ead14bf 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -16,6 +16,7 @@
//
// class Controller {
// public:
+// Controller() : weak_factory_(this) {}
// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
// void WorkComplete(const Result& result) { ... }
// private:
diff --git a/chromium/base/message_loop/incoming_task_queue.cc b/chromium/base/message_loop/incoming_task_queue.cc
index 5e9a4613da1..eab14e9b707 100644
--- a/chromium/base/message_loop/incoming_task_queue.cc
+++ b/chromium/base/message_loop/incoming_task_queue.cc
@@ -147,7 +147,7 @@ bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
}
// Initialize the sequence number. The sequence number is used for delayed
- // tasks (to faciliate FIFO sorting when two tasks have the same
+ // tasks (to facilitate FIFO sorting when two tasks have the same
// delayed_run_time value) and for identifying the task in about:tracing.
pending_task->sequence_num = next_sequence_num_++;
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 4fecbc5a603..a44f46863e4 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -19,6 +19,7 @@
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread_local.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "base/tracked_objects.h"
#if defined(OS_MACOSX)
@@ -43,7 +44,7 @@ namespace {
LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
LAZY_INSTANCE_INITIALIZER;
-// Logical events for Histogram profiling. Run with -message-loop-histogrammer
+// Logical events for Histogram profiling. Run with --message-loop-histogrammer
// to get an accounting of messages and actions taken on each thread.
const int kTaskRunEvent = 0x1;
#if !defined(OS_NACL)
@@ -55,9 +56,9 @@ const int kMaxMessageId = 1099;
const int kNumberOfDistinctMessagesDisplayed = 1100;
// Provide a macro that takes an expression (such as a constant, or macro
-// constant) and creates a pair to initalize an array of pairs. In this case,
+// constant) and creates a pair to initialize an array of pairs. In this case,
// our pair consists of the expressions value, and the "stringized" version
-// of the expression (i.e., the exrpression put in quotes). For example, if
+// of the expression (i.e., the expression put in quotes). For example, if
// we have:
// #define FOO 2
// #define BAR 5
@@ -78,7 +79,7 @@ const LinearHistogram::DescriptionPair event_descriptions_[] = {
VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
- {-1, NULL} // The list must be null terminated, per API to histogram.
+ {-1, NULL} // The list must be null-terminated, per API to histogram.
};
#endif // !defined(OS_NACL)
@@ -361,7 +362,7 @@ bool MessageLoop::HasHighResolutionTasks() {
}
bool MessageLoop::IsIdleForTesting() {
- // We only check the imcoming queue|, since we don't want to lock the work
+ // We only check the incoming queue, since we don't want to lock the work
// queue.
return incoming_task_queue_->IsIdleForTesting();
}
@@ -407,7 +408,8 @@ void MessageLoop::BindToCurrentThread() {
incoming_task_queue_->StartScheduling();
unbound_task_runner_->BindToCurrentThread();
- SetTaskRunner(unbound_task_runner_.Pass());
+ unbound_task_runner_ = nullptr;
+ SetThreadTaskRunnerHandle();
}
void MessageLoop::SetTaskRunner(
@@ -416,7 +418,12 @@ void MessageLoop::SetTaskRunner(
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(!unbound_task_runner_);
task_runner_ = task_runner.Pass();
- // Clear the previous thread task runner first because only one can exist at
+ SetThreadTaskRunnerHandle();
+}
+
+void MessageLoop::SetThreadTaskRunnerHandle() {
+ DCHECK_EQ(this, current());
+ // Clear the previous thread task runner first, because only one can exist at
// a time.
thread_task_runner_handle_.reset();
thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
@@ -467,10 +474,11 @@ void MessageLoop::RunTask(const PendingTask& pending_task) {
HistogramEvent(kTaskRunEvent);
+ TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
+
FOR_EACH_OBSERVER(TaskObserver, task_observers_,
WillProcessTask(pending_task));
- task_annotator_.RunTask(
- "MessageLoop::PostTask", "MessageLoop::RunTask", pending_task);
+ task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
FOR_EACH_OBSERVER(TaskObserver, task_observers_,
DidProcessTask(pending_task));
@@ -557,7 +565,7 @@ void MessageLoop::StartHistogrammer() {
"MsgLoop:" + thread_name_,
kLeastNonZeroMessageId, kMaxMessageId,
kNumberOfDistinctMessagesDisplayed,
- message_histogram_->kHexRangePrintingFlag,
+ HistogramBase::kHexRangePrintingFlag,
event_descriptions_);
}
#endif
@@ -607,7 +615,7 @@ bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
return false;
}
- // When we "fall behind," there will be a lot of tasks in the delayed work
+ // When we "fall behind", there will be a lot of tasks in the delayed work
// queue that are ready to run. To increase efficiency when we fall behind,
// we will only call Time::Now() intermittently, and then process all tasks
// that are ready to run before calling it again. As a result, the more we
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index 3445a77aa59..63a29f15e95 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -308,18 +308,18 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
// Enables or disables the recursive task processing. This happens in the case
- // of recursive message loops. Some unwanted message loop may occurs when
+ // of recursive message loops. Some unwanted message loops may occur when
// using common controls or printer functions. By default, recursive task
// processing is disabled.
//
- // Please utilize |ScopedNestableTaskAllower| instead of calling these methods
- // directly. In general nestable message loops are to be avoided. They are
+ // Please use |ScopedNestableTaskAllower| instead of calling these methods
+ // directly. In general, nestable message loops are to be avoided. They are
// dangerous and difficult to get right, so please use with extreme caution.
//
// The specific case where tasks get queued is:
// - The thread is running a message loop.
- // - It receives a task #1 and execute it.
- // - The task #1 implicitly start a message loop, like a MessageBox in the
+ // - It receives a task #1 and executes it.
+ // - The task #1 implicitly starts a message loop, like a MessageBox in the
// unit test. This can also be StartDoc or GetSaveFileName.
// - The thread receives a task #2 before or while in this second message
// loop.
@@ -420,7 +420,7 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// and then pass it to the thread where the message loop actually runs.
// The message loop's BindToCurrentThread() method must be called on the
// thread the message loop runs on, before calling Run().
- // Before BindToCurrentThread() is called only Post*Task() functions can
+ // Before BindToCurrentThread() is called, only Post*Task() functions can
// be called on the message loop.
static scoped_ptr<MessageLoop> CreateUnbound(
Type type,
@@ -433,6 +433,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// Configure various members and bind this message loop to the current thread.
void BindToCurrentThread();
+ // Sets the ThreadTaskRunnerHandle for the current thread to point to the
+ // task runner for this message loop.
+ void SetThreadTaskRunnerHandle();
+
// Invokes the actual run loop using the message pump.
void RunHandler();
@@ -507,7 +511,7 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
bool nestable_tasks_allowed_;
#if defined(OS_WIN)
- // Should be set to true before calling Windows APIs like TrackPopupMenu, etc
+ // Should be set to true before calling Windows APIs like TrackPopupMenu, etc.
// which enter a modal message loop.
bool os_modal_loop_;
#endif
diff --git a/chromium/base/message_loop/message_loop_task_runner_unittest.cc b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
index caf88afadd0..a0d84b7edb3 100644
--- a/chromium/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/chromium/base/message_loop/message_loop_task_runner_unittest.cc
@@ -30,7 +30,7 @@ class MessageLoopTaskRunnerTest : public testing::Test {
protected:
void SetUp() override {
// Use SetUp() instead of the constructor to avoid posting a task to a
- // partialy constructed object.
+ // partially constructed object.
task_thread_.Start();
// Allow us to pause the |task_thread_|'s MessageLoop.
@@ -41,7 +41,7 @@ class MessageLoopTaskRunnerTest : public testing::Test {
void TearDown() override {
// Make sure the |task_thread_| is not blocked, and stop the thread
- // fully before destuction because its tasks may still depend on the
+ // fully before destruction because its tasks may still depend on the
// |thread_sync_| event.
thread_sync_.Signal();
task_thread_.Stop();
@@ -244,7 +244,7 @@ TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
// it, we cannot just delete it because PostTaskAndReplyRelay's destructor
// checks that MessageLoop::current() is the the same as when the
// PostTaskAndReplyRelay object was constructed. However, this loop must have
- // aleady been deleted in order to perform this test. See
+ // already been deleted in order to perform this test. See
// http://crbug.com/86301.
}
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index 9c170175688..89f9a40a5ef 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -119,7 +119,7 @@ void RunTest_PostDelayedTask_SharedTimer_SubPump() {
Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
TimeDelta::FromSeconds(1000));
- // This slightly delayed task should run from within SubPumpFunc).
+ // This slightly delayed task should run from within SubPumpFunc.
loop.PostDelayedTask(
FROM_HERE,
Bind(&PostQuitMessage, 0),
@@ -600,7 +600,7 @@ void RunTest_WaitForIO() {
TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
thread_loop->PostTask(FROM_HERE, Bind(&TestIOHandler::Init,
Unretained(&handler1)));
- // TODO(ajwong): Do we really need such long Sleeps in ths function?
+ // TODO(ajwong): Do we really need such long Sleeps in this function?
// Make sure the thread runs and sleeps for lack of work.
TimeDelta delay = TimeDelta::FromMilliseconds(100);
PlatformThread::Sleep(delay);
@@ -645,7 +645,7 @@ TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
RunTest_PostDelayedTask_SharedTimer_SubPump();
}
-// This test occasionally hangs http://crbug.com/44567
+// This test occasionally hangs. See http://crbug.com/44567.
TEST(MessageLoopTest, DISABLED_RecursiveDenial2) {
RunTest_RecursiveDenial2(MessageLoop::TYPE_DEFAULT);
RunTest_RecursiveDenial2(MessageLoop::TYPE_UI);
@@ -653,7 +653,7 @@ TEST(MessageLoopTest, DISABLED_RecursiveDenial2) {
}
TEST(MessageLoopTest, RecursiveSupport2) {
- // This test requires a UI loop
+ // This test requires a UI loop.
RunTest_RecursiveSupport2(MessageLoop::TYPE_UI);
}
#endif // defined(OS_WIN)
diff --git a/chromium/base/message_loop/message_pump_android.cc b/chromium/base/message_loop/message_pump_android.cc
index babd17b577e..a0eee12c85c 100644
--- a/chromium/base/message_loop/message_pump_android.cc
+++ b/chromium/base/message_loop/message_pump_android.cc
@@ -21,8 +21,10 @@ using base::android::ScopedJavaLocalRef;
// ----------------------------------------------------------------------------
// This method can not move to anonymous namespace as it has been declared as
// 'static' in system_message_handler_jni.h.
-static void DoRunLoopOnce(JNIEnv* env, jobject obj, jlong native_delegate,
- jlong delayed_scheduled_time_ticks) {
+static void DoRunLoopOnce(JNIEnv* env,
+ const JavaParamRef<jobject>& obj,
+ jlong native_delegate,
+ jlong delayed_scheduled_time_ticks) {
base::MessagePump::Delegate* delegate =
reinterpret_cast<base::MessagePump::Delegate*>(native_delegate);
DCHECK(delegate);
diff --git a/chromium/base/message_loop/message_pump_glib.cc b/chromium/base/message_loop/message_pump_glib.cc
index f06f60d8cf5..fd23745f4e1 100644
--- a/chromium/base/message_loop/message_pump_glib.cc
+++ b/chromium/base/message_loop/message_pump_glib.cc
@@ -52,7 +52,7 @@ int GetTimeIntervalMilliseconds(const TimeTicks& from) {
// returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
// (i.e., you can call Run from them), but Prepare and Check cannot.
// Finalize is called when the source is destroyed.
-// NOTE: It is common for subsytems to want to process pending events while
+// NOTE: It is common for subsystems to want to process pending events while
// doing intensive work, for example the flash plugin. They usually use the
// following pattern (recommended by the GTK docs):
// while (gtk_events_pending()) {
@@ -350,7 +350,7 @@ void MessagePumpGlib::ScheduleWork() {
void MessagePumpGlib::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
// We need to wake up the loop in case the poll timeout needs to be
- // adjusted. This will cause us to try to do work, but that's ok.
+ // adjusted. This will cause us to try to do work, but that's OK.
delayed_work_time_ = delayed_work_time;
ScheduleWork();
}
diff --git a/chromium/base/message_loop/message_pump_libevent.cc b/chromium/base/message_loop/message_pump_libevent.cc
index b5b1fb7e81e..74602a7b320 100644
--- a/chromium/base/message_loop/message_pump_libevent.cc
+++ b/chromium/base/message_loop/message_pump_libevent.cc
@@ -55,13 +55,17 @@ MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher()
: event_(NULL),
pump_(NULL),
watcher_(NULL),
- weak_factory_(this) {
+ was_destroyed_(NULL) {
}
MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
if (event_) {
StopWatchingFileDescriptor();
}
+ if (was_destroyed_) {
+ DCHECK(!*was_destroyed_);
+ *was_destroyed_ = true;
+ }
}
bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
@@ -340,23 +344,31 @@ bool MessagePumpLibevent::Init() {
}
// static
-void MessagePumpLibevent::OnLibeventNotification(int fd, short flags,
+void MessagePumpLibevent::OnLibeventNotification(int fd,
+ short flags,
void* context) {
- WeakPtr<FileDescriptorWatcher> controller =
- static_cast<FileDescriptorWatcher*>(context)->weak_factory_.GetWeakPtr();
- DCHECK(controller.get());
+ FileDescriptorWatcher* controller =
+ static_cast<FileDescriptorWatcher*>(context);
+ DCHECK(controller);
TRACE_EVENT1("toplevel", "MessagePumpLibevent::OnLibeventNotification",
"fd", fd);
MessagePumpLibevent* pump = controller->pump();
pump->processed_io_events_ = true;
- if (flags & EV_WRITE) {
+ if ((flags & (EV_READ | EV_WRITE)) == (EV_READ | EV_WRITE)) {
+ // Both callbacks will be called. It is necessary to check that |controller|
+ // is not destroyed.
+ bool controller_was_destroyed = false;
+ controller->was_destroyed_ = &controller_was_destroyed;
controller->OnFileCanWriteWithoutBlocking(fd, pump);
- }
- // Check |controller| in case it's been deleted in
- // controller->OnFileCanWriteWithoutBlocking().
- if (controller.get() && flags & EV_READ) {
+ if (!controller_was_destroyed)
+ controller->OnFileCanReadWithoutBlocking(fd, pump);
+ if (!controller_was_destroyed)
+ controller->was_destroyed_ = nullptr;
+ } else if (flags & EV_WRITE) {
+ controller->OnFileCanWriteWithoutBlocking(fd, pump);
+ } else if (flags & EV_READ) {
controller->OnFileCanReadWithoutBlocking(fd, pump);
}
}
diff --git a/chromium/base/message_loop/message_pump_libevent.h b/chromium/base/message_loop/message_pump_libevent.h
index 3f5ad51dae9..8b815aea588 100644
--- a/chromium/base/message_loop/message_pump_libevent.h
+++ b/chromium/base/message_loop/message_pump_libevent.h
@@ -7,7 +7,6 @@
#include "base/basictypes.h"
#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_pump.h"
#include "base/observer_list.h"
#include "base/threading/thread_checker.h"
@@ -86,7 +85,9 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
event* event_;
MessagePumpLibevent* pump_;
Watcher* watcher_;
- WeakPtrFactory<FileDescriptorWatcher> weak_factory_;
+ // If this pointer is non-NULL, the pointee is set to true in the
+ // destructor.
+ bool* was_destroyed_;
DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
};
diff --git a/chromium/base/message_loop/message_pump_mac.h b/chromium/base/message_loop/message_pump_mac.h
index c8532020847..c46f2612666 100644
--- a/chromium/base/message_loop/message_pump_mac.h
+++ b/chromium/base/message_loop/message_pump_mac.h
@@ -22,7 +22,7 @@
//
// Typically, MessagePumpNSApplication only makes sense on a Cocoa
// application's main thread. If a CFRunLoop-based message pump is needed on
-// any other thread, one of the other concrete subclasses is preferrable.
+// any other thread, one of the other concrete subclasses is preferable.
// MessagePumpMac::Create is defined, which returns a new NSApplication-based
// or NSRunLoop-based MessagePump subclass depending on which thread it is
// called on.
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index 7d833befa61..9f76064ce38 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -70,6 +70,13 @@ class ScheduleWorkTest : public testing::Test {
{
target_.reset(new Thread("target"));
target_->StartWithOptions(Thread::Options(target_type, 0u));
+
+ // Without this, it's possible for the scheduling threads to start and run
+ // before the target thread. In this case, the scheduling threads will
+ // call target_message_loop()->ScheduleWork(), which dereferences the
+ // loop's message pump, which is only created after the target thread has
+ // finished starting.
+ target_->WaitUntilThreadStarted();
}
ScopedVector<Thread> scheduling_threads;
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index cdbf0c260a9..14e432015e0 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -10,7 +10,6 @@
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/process/memory.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "base/win/wrapped_window_proc.h"
@@ -132,11 +131,6 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
// static
LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
- // TODO(vadimt): Remove ScopedTracker below once crbug.com/440919 is fixed.
- tracked_objects::ScopedTracker tracking_profile1(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "440919 MessagePumpForUI::WndProcThunk1"));
-
switch (message) {
case kMsgHaveWork:
reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
@@ -145,12 +139,6 @@ LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
break;
}
-
- // TODO(vadimt): Remove ScopedTracker below once crbug.com/440919 is fixed.
- tracked_objects::ScopedTracker tracking_profile2(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "440919 MessagePumpForUI::WndProcThunk2"));
-
return DefWindowProc(hwnd, message, wparam, lparam);
}
@@ -319,7 +307,7 @@ void MessagePumpForUI::RescheduleTimer() {
//
// We use a single SetTimer corresponding to the timer that will expire
// soonest. As new timers are created and destroyed, we update SetTimer.
- // Getting a spurrious SetTimer event firing is benign, as we'll just be
+ // Getting a spurious SetTimer event firing is benign, as we'll just be
// processing an empty timer queue.
//
int delay_msec = GetCurrentDelay();
@@ -345,11 +333,6 @@ void MessagePumpForUI::RescheduleTimer() {
}
bool MessagePumpForUI::ProcessNextWindowsMessage() {
- // TODO(vadimt): Remove ScopedTracker below once crbug.com/440919 is fixed.
- tracked_objects::ScopedTracker tracking_profile1(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "440919 MessagePumpForUI::ProcessNextWindowsMessage1"));
-
// If there are sent messages in the queue then PeekMessage internally
// dispatches the message and returns false. We return true in this
// case to ensure that the message loop peeks again instead of calling
@@ -359,11 +342,6 @@ bool MessagePumpForUI::ProcessNextWindowsMessage() {
if (HIWORD(queue_status) & QS_SENDMESSAGE)
sent_messages_in_queue = true;
- // TODO(vadimt): Remove ScopedTracker below once crbug.com/440919 is fixed.
- tracked_objects::ScopedTracker tracking_profile2(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "440919 MessagePumpForUI::ProcessNextWindowsMessage2"));
-
MSG msg;
if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE) != FALSE)
return ProcessMessageHelper(msg);
@@ -390,14 +368,8 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
return true;
uint32_t action = MessagePumpDispatcher::POST_DISPATCH_PERFORM_DEFAULT;
- if (state_->dispatcher) {
- // TODO(vadimt): Remove ScopedTracker below once crbug.com/440919 is fixed.
- tracked_objects::ScopedTracker tracking_profile4(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "440919 MessagePumpForUI::ProcessMessageHelper4"));
-
+ if (state_->dispatcher)
action = state_->dispatcher->Dispatch(msg);
- }
if (action & MessagePumpDispatcher::POST_DISPATCH_QUIT_LOOP)
state_->should_quit = true;
if (action & MessagePumpDispatcher::POST_DISPATCH_PERFORM_DEFAULT) {
@@ -614,8 +586,8 @@ bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
}
bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
- if (this == reinterpret_cast<MessagePumpForIO*>(item.context) &&
- this == reinterpret_cast<MessagePumpForIO*>(item.handler)) {
+ if (reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.context) &&
+ reinterpret_cast<void*>(this) == reinterpret_cast<void*>(item.handler)) {
// This is our internal completion.
DCHECK(!item.bytes_transfered);
InterlockedExchange(&have_work_, 0);
diff --git a/chromium/base/message_loop/message_pump_win.h b/chromium/base/message_loop/message_pump_win.h
index 00a1e77e279..9f1838d6e4f 100644
--- a/chromium/base/message_loop/message_pump_win.h
+++ b/chromium/base/message_loop/message_pump_win.h
@@ -78,7 +78,7 @@ class BASE_EXPORT MessagePumpWin : public MessagePump {
// message. MessageLoop extends that bare-bones message pump to also service
// Tasks, at the cost of some complexity.
//
-// The basic structure of the extension (refered to as a sub-pump) is that a
+// The basic structure of the extension (referred to as a sub-pump) is that a
// special message, kMsgHaveWork, is repeatedly injected into the Windows
// Message queue. Each time the kMsgHaveWork message is peeked, checks are
// made for an extended set of events, including the availability of Tasks to
diff --git a/chromium/base/metrics/BUILD.gn b/chromium/base/metrics/BUILD.gn
index 845ac4f79c8..159dfd49b29 100644
--- a/chromium/base/metrics/BUILD.gn
+++ b/chromium/base/metrics/BUILD.gn
@@ -33,10 +33,6 @@ source_set("metrics") {
"user_metrics_action.h",
]
- if (is_nacl) {
- sources -= [ "field_trial.cc" ]
- }
-
configs += [ "//base:base_implementation" ]
deps = [
diff --git a/chromium/base/metrics/bucket_ranges.h b/chromium/base/metrics/bucket_ranges.h
index fe1152f5dbc..6ea5849370c 100644
--- a/chromium/base/metrics/bucket_ranges.h
+++ b/chromium/base/metrics/bucket_ranges.h
@@ -21,7 +21,6 @@
#include "base/base_export.h"
#include "base/basictypes.h"
-#include "base/gtest_prod_util.h"
#include "base/metrics/histogram_base.h"
namespace base {
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 639f6d38e24..2ad7517a4bd 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -13,7 +13,6 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/sys_byteorder.h"
namespace base {
@@ -140,6 +139,11 @@ const std::string& FieldTrial::group_name() {
return group_name_;
}
+const std::string& FieldTrial::GetGroupNameWithoutActivation() {
+ FinalizeGroupChoice();
+ return group_name_;
+}
+
void FieldTrial::SetForced() {
// We might have been forced before (e.g., by CreateFieldTrial) and it's
// first come first served, e.g., command line switch has precedence.
@@ -355,32 +359,39 @@ FieldTrial* FieldTrialList::FactoryGetFieldTrialWithRandomizationSeed(
}
// static
-FieldTrial* FieldTrialList::Find(const std::string& name) {
+FieldTrial* FieldTrialList::Find(const std::string& trial_name) {
if (!global_)
return NULL;
AutoLock auto_lock(global_->lock_);
- return global_->PreLockedFind(name);
+ return global_->PreLockedFind(trial_name);
}
// static
-int FieldTrialList::FindValue(const std::string& name) {
- FieldTrial* field_trial = Find(name);
+int FieldTrialList::FindValue(const std::string& trial_name) {
+ FieldTrial* field_trial = Find(trial_name);
if (field_trial)
return field_trial->group();
return FieldTrial::kNotFinalized;
}
// static
-std::string FieldTrialList::FindFullName(const std::string& name) {
- FieldTrial* field_trial = Find(name);
+std::string FieldTrialList::FindFullName(const std::string& trial_name) {
+ FieldTrial* field_trial = Find(trial_name);
if (field_trial)
return field_trial->group_name();
return std::string();
}
// static
-bool FieldTrialList::TrialExists(const std::string& name) {
- return Find(name) != NULL;
+bool FieldTrialList::TrialExists(const std::string& trial_name) {
+ return Find(trial_name) != NULL;
+}
+
+// static
+bool FieldTrialList::IsTrialActive(const std::string& trial_name) {
+ FieldTrial* field_trial = Find(trial_name);
+ FieldTrial::ActiveGroup active_group;
+ return field_trial && field_trial->GetActiveGroup(&active_group);
}
// static
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index 26257ab4a89..899d89a5742 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -149,6 +149,11 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// is used as the group name. This causes a winner to be chosen if none was.
const std::string& group_name();
+ // Finalizes the group choice and returns the chosen group, but does not mark
+ // the trial as active - so its state will not be reported until group_name()
+ // or similar is called.
+ const std::string& GetGroupNameWithoutActivation();
+
// Set the field trial as forced, meaning that it was setup earlier than
// the hard coded registration of the field trial to override it.
// This allows the code that was hard coded to register the field trial to
@@ -396,20 +401,25 @@ class BASE_EXPORT FieldTrialList {
uint32 randomization_seed,
int* default_group_number);
- // The Find() method can be used to test to see if a named Trial was already
+ // The Find() method can be used to test to see if a named trial was already
// registered, or to retrieve a pointer to it from the global map.
- static FieldTrial* Find(const std::string& name);
+ static FieldTrial* Find(const std::string& trial_name);
// Returns the group number chosen for the named trial, or
// FieldTrial::kNotFinalized if the trial does not exist.
- static int FindValue(const std::string& name);
+ static int FindValue(const std::string& trial_name);
- // Returns the group name chosen for the named trial, or the
- // empty string if the trial does not exist.
- static std::string FindFullName(const std::string& name);
+ // Returns the group name chosen for the named trial, or the empty string if
+ // the trial does not exist. The first call of this function on a given field
+ // trial will mark it as active, so that its state will be reported with usage
+ // metrics, crashes, etc.
+ static std::string FindFullName(const std::string& trial_name);
// Returns true if the named trial has been registered.
- static bool TrialExists(const std::string& name);
+ static bool TrialExists(const std::string& trial_name);
+
+ // Returns true if the named trial exists and has been activated.
+ static bool IsTrialActive(const std::string& trial_name);
// Creates a persistent representation of active FieldTrial instances for
// resurrection in another process. This allows randomization to be done in
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index ed84d865d5e..6cecc00fd8a 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -72,6 +72,8 @@ class FieldTrialTest : public testing::Test {
private:
MessageLoop message_loop_;
FieldTrialList trial_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrialTest);
};
// Test registration, and also check that destructors are called for trials
@@ -376,6 +378,28 @@ TEST_F(FieldTrialTest, ActiveGroupsNotFinalized) {
EXPECT_EQ(active_group.group_name, active_groups[0].group_name);
}
+TEST_F(FieldTrialTest, GetGroupNameWithoutActivation) {
+ const char kTrialName[] = "TestTrial";
+ const char kSecondaryGroupName[] = "SecondaryGroup";
+
+ int default_group = -1;
+ scoped_refptr<FieldTrial> trial =
+ CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+ trial->AppendGroup(kSecondaryGroupName, 50);
+
+ // The trial should start inactive.
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+ // Calling |GetGroupNameWithoutActivation()| should not activate the trial.
+ std::string group_name = trial->GetGroupNameWithoutActivation();
+ EXPECT_FALSE(group_name.empty());
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+
+ // Calling |group_name()| should activate it and return the same group name.
+ EXPECT_EQ(group_name, trial->group_name());
+ EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+}
+
TEST_F(FieldTrialTest, Save) {
std::string save_string;
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 7dfb552844d..b37bc4c4685 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -272,6 +272,10 @@ bool Histogram::HasConstructionArguments(Sample expected_minimum,
}
void Histogram::Add(int value) {
+ AddCount(value, 1);
+}
+
+void Histogram::AddCount(int value, int count) {
DCHECK_EQ(0, ranges(0));
DCHECK_EQ(kSampleType_MAX, ranges(bucket_count()));
@@ -279,7 +283,13 @@ void Histogram::Add(int value) {
value = kSampleType_MAX - 1;
if (value < 0)
value = 0;
- samples_->Accumulate(value, 1);
+ if (count <= 0) {
+ NOTREACHED();
+ return;
+ }
+ samples_->Accumulate(value, count);
+
+ FindAndRunCallback(value);
}
scoped_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h
index 58bc0297aba..1f6e2a1ceeb 100644
--- a/chromium/base/metrics/histogram.h
+++ b/chromium/base/metrics/histogram.h
@@ -181,6 +181,7 @@ class BASE_EXPORT Histogram : public HistogramBase {
Sample expected_maximum,
size_t expected_bucket_count) const override;
void Add(Sample value) override;
+ void AddCount(Sample value, int count) override;
scoped_ptr<HistogramSamples> SnapshotSamples() const override;
void AddSamples(const HistogramSamples& samples) override;
bool AddSamplesFromPickle(base::PickleIterator* iter) override;
@@ -218,6 +219,7 @@ class BASE_EXPORT Histogram : public HistogramBase {
FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptBucketBounds);
FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
FRIEND_TEST_ALL_PREFIXES(HistogramTest, NameMatchTest);
+ FRIEND_TEST_ALL_PREFIXES(HistogramTest, AddCountTest);
friend class StatisticsRecorder; // To allow it to delete duplicates.
friend class StatisticsRecorderTest;
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index de34c79d4b4..6b3f69c2c04 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -12,6 +12,7 @@
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
#include "base/process/process_handle.h"
#include "base/strings/stringprintf.h"
@@ -117,6 +118,16 @@ void HistogramBase::WriteJSON(std::string* output) const {
serializer.Serialize(root);
}
+void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
+ if ((flags_ & kCallbackExists) == 0)
+ return;
+
+ StatisticsRecorder::OnSampleCallback cb =
+ StatisticsRecorder::FindCallback(histogram_name());
+ if (!cb.is_null())
+ cb.Run(sample);
+}
+
void HistogramBase::WriteAsciiBucketGraph(double current_size,
double max_size,
std::string* output) const {
diff --git a/chromium/base/metrics/histogram_base.h b/chromium/base/metrics/histogram_base.h
index 9979d470ff5..304e3e03b6b 100644
--- a/chromium/base/metrics/histogram_base.h
+++ b/chromium/base/metrics/histogram_base.h
@@ -74,6 +74,12 @@ class BASE_EXPORT HistogramBase {
// the source histogram!).
kIPCSerializationSourceFlag = 0x10,
+ // Indicates that a callback exists for when a new sample is recorded on
+ // this histogram. We store this as a flag with the histogram since
+ // histograms can be in performance critical code, and this allows us
+ // to shortcut looking up the callback if it doesn't exist.
+ kCallbackExists = 0x20,
+
// Only for Histogram and its sub classes: fancy bucket-naming support.
kHexRangePrintingFlag = 0x8000,
};
@@ -115,6 +121,12 @@ class BASE_EXPORT HistogramBase {
virtual void Add(Sample value) = 0;
+ // In Add function the |value| bucket is increased by one, but in some use
+ // cases we need to increase this value by an arbitrary integer. AddCount
+ // function increases the |value| bucket by |count|. |count| should be greater
+ // than or equal to 1.
+ virtual void AddCount(Sample value, int count) = 0;
+
// 2 convenient functions that call Add(Sample).
void AddTime(const TimeDelta& time);
void AddBoolean(bool value);
@@ -172,6 +184,10 @@ class BASE_EXPORT HistogramBase {
double scaled_sum,
std::string* output) const;
+ // Retrieves the callback for this histogram, if one exists, and runs it
+ // passing |sample| as the parameter.
+ void FindAndRunCallback(Sample sample) const;
+
private:
const std::string histogram_name_;
int32_t flags_;
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index 988e942caf6..882c58ac44b 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -206,6 +206,9 @@
#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000, 50)
+#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample) \
+ UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
+
#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
diff --git a/chromium/base/metrics/histogram_unittest.cc b/chromium/base/metrics/histogram_unittest.cc
index f189267f358..b144379050b 100644
--- a/chromium/base/metrics/histogram_unittest.cc
+++ b/chromium/base/metrics/histogram_unittest.cc
@@ -231,6 +231,30 @@ TEST_F(HistogramTest, CustomHistogramWithOnly2Buckets) {
EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
}
+// Test the AddCount function.
+TEST_F(HistogramTest, AddCountTest) {
+ const size_t kBucketCount = 50;
+ Histogram* histogram = static_cast<Histogram*>(
+ Histogram::FactoryGet("AddCountHistogram", 10, 100, kBucketCount,
+ HistogramBase::kNoFlags));
+
+ histogram->AddCount(20, 15);
+ histogram->AddCount(30, 14);
+
+ scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+ EXPECT_EQ(29, samples->TotalCount());
+ EXPECT_EQ(15, samples->GetCount(20));
+ EXPECT_EQ(14, samples->GetCount(30));
+
+ histogram->AddCount(20, 25);
+ histogram->AddCount(30, 24);
+
+ scoped_ptr<SampleVector> samples2 = histogram->SnapshotSampleVector();
+ EXPECT_EQ(78, samples2->TotalCount());
+ EXPECT_EQ(40, samples2->GetCount(20));
+ EXPECT_EQ(38, samples2->GetCount(30));
+}
+
// Make sure histogram handles out-of-bounds data gracefully.
TEST_F(HistogramTest, BoundsTest) {
const size_t kBucketCount = 50;
diff --git a/chromium/base/metrics/sparse_histogram.cc b/chromium/base/metrics/sparse_histogram.cc
index e5cdb43c04f..39c276d7877 100644
--- a/chromium/base/metrics/sparse_histogram.cc
+++ b/chromium/base/metrics/sparse_histogram.cc
@@ -46,8 +46,20 @@ bool SparseHistogram::HasConstructionArguments(
}
void SparseHistogram::Add(Sample value) {
- base::AutoLock auto_lock(lock_);
- samples_.Accumulate(value, 1);
+ AddCount(value, 1);
+}
+
+void SparseHistogram::AddCount(Sample value, int count) {
+ if (count <= 0) {
+ NOTREACHED();
+ return;
+ }
+ {
+ base::AutoLock auto_lock(lock_);
+ samples_.Accumulate(value, count);
+ }
+
+ FindAndRunCallback(value);
}
scoped_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
diff --git a/chromium/base/metrics/sparse_histogram.h b/chromium/base/metrics/sparse_histogram.h
index a02da592d6f..3abd0805369 100644
--- a/chromium/base/metrics/sparse_histogram.h
+++ b/chromium/base/metrics/sparse_histogram.h
@@ -42,6 +42,7 @@ class BASE_EXPORT_PRIVATE SparseHistogram : public HistogramBase {
Sample expected_maximum,
size_t expected_bucket_count) const override;
void Add(Sample value) override;
+ void AddCount(Sample value, int count) override;
void AddSamples(const HistogramSamples& samples) override;
bool AddSamplesFromPickle(base::PickleIterator* iter) override;
scoped_ptr<HistogramSamples> SnapshotSamples() const override;
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index fca4d59e509..83cf5d37247 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -62,6 +62,25 @@ TEST_F(SparseHistogramTest, BasicTest) {
EXPECT_EQ(1, snapshot2->GetCount(101));
}
+TEST_F(SparseHistogramTest, BasicTestAddCount) {
+ scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+ scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+ EXPECT_EQ(0, snapshot->TotalCount());
+ EXPECT_EQ(0, snapshot->sum());
+
+ histogram->AddCount(100, 15);
+ scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+ EXPECT_EQ(15, snapshot1->TotalCount());
+ EXPECT_EQ(15, snapshot1->GetCount(100));
+
+ histogram->AddCount(100, 15);
+ histogram->AddCount(101, 25);
+ scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+ EXPECT_EQ(55, snapshot2->TotalCount());
+ EXPECT_EQ(30, snapshot2->GetCount(100));
+ EXPECT_EQ(25, snapshot2->GetCount(101));
+}
+
TEST_F(SparseHistogramTest, MacroBasicTest) {
UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 200);
diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc
index f14f037b9e7..87ffa3dbcd0 100644
--- a/chromium/base/metrics/statistics_recorder.cc
+++ b/chromium/base/metrics/statistics_recorder.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/metrics/histogram.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/values.h"
@@ -61,6 +62,15 @@ HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
if (histograms_->end() == it) {
(*histograms_)[HistogramNameRef(name)] = histogram;
ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
+ // If there are callbacks for this histogram, we set the kCallbackExists
+ // flag.
+ auto callback_iterator = callbacks_->find(name);
+ if (callback_iterator != callbacks_->end()) {
+ if (!callback_iterator->second.is_null())
+ histogram->SetFlags(HistogramBase::kCallbackExists);
+ else
+ histogram->ClearFlags(HistogramBase::kCallbackExists);
+ }
histogram_to_return = histogram;
} else if (histogram == it->second) {
// The histogram was registered before.
@@ -225,6 +235,58 @@ HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
return it->second;
}
+// static
+bool StatisticsRecorder::SetCallback(
+ const std::string& name,
+ const StatisticsRecorder::OnSampleCallback& cb) {
+ DCHECK(!cb.is_null());
+ if (lock_ == NULL)
+ return false;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return false;
+
+ if (ContainsKey(*callbacks_, name))
+ return false;
+ callbacks_->insert(std::make_pair(name, cb));
+
+ auto histogram_iterator = histograms_->find(HistogramNameRef(name));
+ if (histogram_iterator != histograms_->end())
+ histogram_iterator->second->SetFlags(HistogramBase::kCallbackExists);
+
+ return true;
+}
+
+// static
+void StatisticsRecorder::ClearCallback(const std::string& name) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return;
+
+ callbacks_->erase(name);
+
+ // We also clear the flag from the histogram (if it exists).
+ auto histogram_iterator = histograms_->find(HistogramNameRef(name));
+ if (histogram_iterator != histograms_->end())
+ histogram_iterator->second->ClearFlags(HistogramBase::kCallbackExists);
+}
+
+// static
+StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
+ const std::string& name) {
+ if (lock_ == NULL)
+ return OnSampleCallback();
+ base::AutoLock auto_lock(*lock_);
+ if (histograms_ == NULL)
+ return OnSampleCallback();
+
+ auto callback_iterator = callbacks_->find(name);
+ return callback_iterator != callbacks_->end() ? callback_iterator->second
+ : OnSampleCallback();
+}
+
// private static
void StatisticsRecorder::GetSnapshot(const std::string& query,
Histograms* snapshot) {
@@ -256,6 +318,7 @@ StatisticsRecorder::StatisticsRecorder() {
}
base::AutoLock auto_lock(*lock_);
histograms_ = new HistogramMap;
+ callbacks_ = new CallbackMap;
ranges_ = new RangesMap;
if (VLOG_IS_ON(1))
@@ -274,14 +337,17 @@ StatisticsRecorder::~StatisticsRecorder() {
// Clean up.
scoped_ptr<HistogramMap> histograms_deleter;
+ scoped_ptr<CallbackMap> callbacks_deleter;
scoped_ptr<RangesMap> ranges_deleter;
// We don't delete lock_ on purpose to avoid having to properly protect
// against it going away after we checked for NULL in the static methods.
{
base::AutoLock auto_lock(*lock_);
histograms_deleter.reset(histograms_);
+ callbacks_deleter.reset(callbacks_);
ranges_deleter.reset(ranges_);
histograms_ = NULL;
+ callbacks_ = NULL;
ranges_ = NULL;
}
// We are going to leak the histograms and the ranges.
@@ -291,6 +357,8 @@ StatisticsRecorder::~StatisticsRecorder() {
// static
StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
// static
+StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL;
+// static
StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
// static
base::Lock* StatisticsRecorder::lock_ = NULL;
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index 545c4663c52..0e5168f4137 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -17,13 +17,14 @@
#include "base/base_export.h"
#include "base/basictypes.h"
+#include "base/callback.h"
#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h"
+#include "base/metrics/histogram_base.h"
namespace base {
class BucketRanges;
-class HistogramBase;
class Lock;
class BASE_EXPORT StatisticsRecorder {
@@ -75,6 +76,24 @@ class BASE_EXPORT StatisticsRecorder {
// histograms).
static void GetSnapshot(const std::string& query, Histograms* snapshot);
+ typedef base::Callback<void(HistogramBase::Sample)> OnSampleCallback;
+
+ // SetCallback sets the callback to notify when a new sample is recorded on
+ // the histogram referred to by |histogram_name|. The call to this method can
+ // be be done before or after the histogram is created. This method is thread
+ // safe. The return value is whether or not the callback was successfully set.
+ static bool SetCallback(const std::string& histogram_name,
+ const OnSampleCallback& callback);
+
+ // ClearCallback clears any callback set on the histogram referred to by
+ // |histogram_name|. This method is thread safe.
+ static void ClearCallback(const std::string& histogram_name);
+
+ // FindCallback retrieves the callback for the histogram referred to by
+ // |histogram_name|, or a null callback if no callback exists for this
+ // histogram. This method is thread safe.
+ static OnSampleCallback FindCallback(const std::string& histogram_name);
+
private:
// HistogramNameRef holds a weak const ref to the name field of the associated
// Histogram object, allowing re-use of the underlying string storage for the
@@ -95,6 +114,10 @@ class BASE_EXPORT StatisticsRecorder {
// We keep all registered histograms in a map, from name to histogram.
typedef std::map<HistogramNameRef, HistogramBase*> HistogramMap;
+ // We keep a map of callbacks to histograms, so that as histograms are
+ // created, we can set the callback properly.
+ typedef std::map<std::string, OnSampleCallback> CallbackMap;
+
// We keep all |bucket_ranges_| in a map, from checksum to a list of
// |bucket_ranges_|. Checksum is calculated from the |ranges_| in
// |bucket_ranges_|.
@@ -119,6 +142,7 @@ class BASE_EXPORT StatisticsRecorder {
static void DumpHistogramsToVlog(void* instance);
static HistogramMap* histograms_;
+ static CallbackMap* callbacks_;
static RangesMap* ranges_;
// Lock protects access to above maps.
diff --git a/chromium/base/metrics/statistics_recorder_unittest.cc b/chromium/base/metrics/statistics_recorder_unittest.cc
index 25988e80bfe..b18c5803893 100644
--- a/chromium/base/metrics/statistics_recorder_unittest.cc
+++ b/chromium/base/metrics/statistics_recorder_unittest.cc
@@ -4,9 +4,11 @@
#include <vector>
+#include "base/bind.h"
#include "base/json/json_reader.h"
#include "base/memory/scoped_ptr.h"
#include "base/metrics/histogram_macros.h"
+#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -312,4 +314,178 @@ TEST_F(StatisticsRecorderTest, ToJSON) {
EXPECT_TRUE(json.empty());
}
+namespace {
+
+// CallbackCheckWrapper is simply a convenient way to check and store that
+// a callback was actually run.
+struct CallbackCheckWrapper {
+ CallbackCheckWrapper() : called(false), last_histogram_value(0) {}
+
+ void OnHistogramChanged(base::HistogramBase::Sample histogram_value) {
+ called = true;
+ last_histogram_value = histogram_value;
+ }
+
+ bool called;
+ base::HistogramBase::Sample last_histogram_value;
+};
+
+} // namespace
+
+// Check that you can't overwrite the callback with another.
+TEST_F(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
+ CallbackCheckWrapper callback_wrapper;
+
+ bool result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_TRUE(result);
+
+ result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_FALSE(result);
+}
+
+// Check that you can't overwrite the callback with another.
+TEST_F(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
+ HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ bool result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+ base::HistogramBase::kCallbackExists);
+
+ result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_FALSE(result);
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+ base::HistogramBase::kCallbackExists);
+
+ histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+}
+
+// Check that you can't overwrite the callback with another.
+TEST_F(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
+ HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ bool result = base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+ EXPECT_TRUE(result);
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
+ base::HistogramBase::kCallbackExists);
+
+ base::StatisticsRecorder::ClearCallback("TestHistogram");
+ EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists, 0);
+
+ histogram->Add(1);
+
+ EXPECT_FALSE(callback_wrapper.called);
+}
+
+// Check that callback is used.
+TEST_F(StatisticsRecorderTest, CallbackUsedTest) {
+ {
+ HistogramBase* histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+
+ {
+ HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestLinearHistogram",
+ base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ linear_histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+
+ {
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestCustomHistogram",
+ base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ custom_histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+
+ {
+ HistogramBase* custom_histogram = SparseHistogram::FactoryGet(
+ "TestSparseHistogram", HistogramBase::kNoFlags);
+
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestSparseHistogram",
+ base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ custom_histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+ }
+}
+
+// Check that setting a callback before the histogram exists works.
+TEST_F(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
+ CallbackCheckWrapper callback_wrapper;
+
+ base::StatisticsRecorder::SetCallback(
+ "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
+ base::Unretained(&callback_wrapper)));
+
+ HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+ HistogramBase::kNoFlags);
+ EXPECT_TRUE(histogram);
+ histogram->Add(1);
+
+ EXPECT_TRUE(callback_wrapper.called);
+ EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
+}
+
} // namespace base
diff --git a/chromium/base/nix/xdg_util.cc b/chromium/base/nix/xdg_util.cc
index ef045617761..f76c0cb38b1 100644
--- a/chromium/base/nix/xdg_util.cc
+++ b/chromium/base/nix/xdg_util.cc
@@ -15,8 +15,8 @@
namespace {
-// The KDE session version environment variable used in KDE 4.
-const char kKDE4SessionEnvVar[] = "KDE_SESSION_VERSION";
+// The KDE session version environment variable introduced in KDE 4.
+const char kKDESessionEnvVar[] = "KDE_SESSION_VERSION";
} // namespace
@@ -69,6 +69,12 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
} else if (xdg_current_desktop == "GNOME") {
return DESKTOP_ENVIRONMENT_GNOME;
} else if (xdg_current_desktop == "KDE") {
+ std::string kde_session;
+ if (env->GetVar(kKDESessionEnvVar, &kde_session)) {
+ if (kde_session == "5") {
+ return DESKTOP_ENVIRONMENT_KDE5;
+ }
+ }
return DESKTOP_ENVIRONMENT_KDE4;
}
}
@@ -82,7 +88,7 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
return DESKTOP_ENVIRONMENT_KDE4;
} else if (desktop_session == "kde") {
// This may mean KDE4 on newer systems, so we have to check.
- if (env->HasVar(kKDE4SessionEnvVar))
+ if (env->HasVar(kKDESessionEnvVar))
return DESKTOP_ENVIRONMENT_KDE4;
return DESKTOP_ENVIRONMENT_KDE3;
} else if (desktop_session.find("xfce") != std::string::npos ||
@@ -96,7 +102,7 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
if (env->HasVar("GNOME_DESKTOP_SESSION_ID")) {
return DESKTOP_ENVIRONMENT_GNOME;
} else if (env->HasVar("KDE_FULL_SESSION")) {
- if (env->HasVar(kKDE4SessionEnvVar))
+ if (env->HasVar(kKDESessionEnvVar))
return DESKTOP_ENVIRONMENT_KDE4;
return DESKTOP_ENVIRONMENT_KDE3;
}
@@ -114,6 +120,8 @@ const char* GetDesktopEnvironmentName(DesktopEnvironment env) {
return "KDE3";
case DESKTOP_ENVIRONMENT_KDE4:
return "KDE4";
+ case DESKTOP_ENVIRONMENT_KDE5:
+ return "KDE5";
case DESKTOP_ENVIRONMENT_UNITY:
return "UNITY";
case DESKTOP_ENVIRONMENT_XFCE:
diff --git a/chromium/base/nix/xdg_util.h b/chromium/base/nix/xdg_util.h
index a8b778405f2..8812c61f454 100644
--- a/chromium/base/nix/xdg_util.h
+++ b/chromium/base/nix/xdg_util.h
@@ -48,10 +48,11 @@ BASE_EXPORT FilePath GetXDGUserDirectory(const char* dir_name,
enum DesktopEnvironment {
DESKTOP_ENVIRONMENT_OTHER,
DESKTOP_ENVIRONMENT_GNOME,
- // KDE3 and KDE4 are sufficiently different that we count
- // them as two different desktop environments here.
+ // KDE3, KDE4 and KDE5 are sufficiently different that we count
+ // them as different desktop environments here.
DESKTOP_ENVIRONMENT_KDE3,
DESKTOP_ENVIRONMENT_KDE4,
+ DESKTOP_ENVIRONMENT_KDE5,
DESKTOP_ENVIRONMENT_UNITY,
DESKTOP_ENVIRONMENT_XFCE,
};
diff --git a/chromium/base/nix/xdg_util_unittest.cc b/chromium/base/nix/xdg_util_unittest.cc
index 136eb5d745d..a05435545ab 100644
--- a/chromium/base/nix/xdg_util_unittest.cc
+++ b/chromium/base/nix/xdg_util_unittest.cc
@@ -35,8 +35,10 @@ const char* const kDesktopXFCE = "xfce";
const char* const kXdgDesktopGNOME = "GNOME";
const char* const kXdgDesktopKDE = "KDE";
const char* const kXdgDesktopUnity = "Unity";
+const char* const kKDESessionKDE5 = "5";
const char kDesktopSession[] = "DESKTOP_SESSION";
+const char kKDESession[] = "KDE_SESSION_VERSION";
const char kXdgDesktop[] = "XDG_CURRENT_DESKTOP";
} // namespace
@@ -107,6 +109,17 @@ TEST(XDGUtilTest, GetXdgDesktopGnomeFallback) {
EXPECT_EQ(DESKTOP_ENVIRONMENT_GNOME, GetDesktopEnvironment(&getter));
}
+TEST(XDGUtilTest, GetXdgDesktopKDE5) {
+ MockEnvironment getter;
+ EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+ EXPECT_CALL(getter, GetVar(StrEq(kXdgDesktop), _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kXdgDesktopKDE), Return(true)));
+ EXPECT_CALL(getter, GetVar(StrEq(kKDESession), _))
+ .WillOnce(DoAll(SetArgumentPointee<1>(kKDESessionKDE5), Return(true)));
+
+ EXPECT_EQ(DESKTOP_ENVIRONMENT_KDE5, GetDesktopEnvironment(&getter));
+}
+
TEST(XDGUtilTest, GetXdgDesktopKDE4) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
diff --git a/chromium/base/numerics/safe_conversions.h b/chromium/base/numerics/safe_conversions.h
index 5dd51919595..13c24ce4fcb 100644
--- a/chromium/base/numerics/safe_conversions.h
+++ b/chromium/base/numerics/safe_conversions.h
@@ -20,6 +20,24 @@ inline bool IsValueInRangeForNumericType(Src value) {
internal::RANGE_VALID;
}
+// Convenience function for determining if a numeric value is negative without
+// throwing compiler warnings on: unsigned(value) < 0.
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_signed, bool>::type
+IsValueNegative(T value) {
+ static_assert(std::numeric_limits<T>::is_specialized,
+ "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T>
+typename enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+IsValueNegative(T) {
+ static_assert(std::numeric_limits<T>::is_specialized,
+ "Argument must be numeric.");
+ return false;
+}
+
// checked_cast<> is analogous to static_cast<> for numeric types,
// except that it CHECKs that the specified numeric conversion will not
// overflow or underflow. NaN source will always trigger a CHECK.
diff --git a/chromium/base/numerics/safe_conversions_impl.h b/chromium/base/numerics/safe_conversions_impl.h
index 41570671601..f4bc9161a0d 100644
--- a/chromium/base/numerics/safe_conversions_impl.h
+++ b/chromium/base/numerics/safe_conversions_impl.h
@@ -108,6 +108,55 @@ inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
(is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
}
+// The following helper template addresses a corner case in range checks for
+// conversion from a floating-point type to an integral type of smaller range
+// but larger precision (e.g. float -> unsigned). The problem is as follows:
+// 1. Integral maximum is always one less than a power of two, so it must be
+// truncated to fit the mantissa of the floating point. The direction of
+// rounding is implementation defined, but by default it's always IEEE
+// floats, which round to nearest and thus result in a value of larger
+// magnitude than the integral value.
+// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
+// // is 4294967295u.
+// 2. If the floating point value is equal to the promoted integral maximum
+// value, a range check will erroneously pass.
+// Example: (4294967296f <= 4294967295u) // This is true due to a precision
+// // loss in rounding up to float.
+// 3. When the floating point value is then converted to an integral, the
+// resulting value is out of range for the target integral type and
+// thus is implementation defined.
+// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
+// To fix this bug we manually truncate the maximum value when the destination
+// type is an integral of larger precision than the source floating-point type,
+// such that the resulting maximum is represented exactly as a floating point.
+template <typename Dst, typename Src>
+struct NarrowingRange {
+ typedef typename std::numeric_limits<Src> SrcLimits;
+ typedef typename std::numeric_limits<Dst> DstLimits;
+
+ static Dst max() {
+ // The following logic avoids warnings where the max function is
+ // instantiated with invalid values for a bit shift (even though
+ // such a function can never be called).
+ static const int shift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits && SrcLimits::is_iec559 &&
+ DstLimits::is_integer)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+
+ // We use UINTMAX_C below to avoid compiler warnings about shifting floating
+ // points. Since it's a compile time calculation, it shouldn't have any
+ // performance impact.
+ return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
+ }
+
+ static Dst min() {
+ return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
+ : DstLimits::min();
+ }
+};
+
template <
typename Dst,
typename Src,
@@ -147,11 +196,8 @@ struct DstRangeRelationToSrcRangeImpl<Dst,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static RangeConstraint Check(Src value) {
- return std::numeric_limits<Dst>::is_iec559
- ? GetRangeConstraint((value < std::numeric_limits<Dst>::max()),
- (value > -std::numeric_limits<Dst>::max()))
- : GetRangeConstraint((value < std::numeric_limits<Dst>::max()),
- (value > std::numeric_limits<Dst>::min()));
+ return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
+ (value >= NarrowingRange<Dst, Src>::min()));
}
};
@@ -163,7 +209,7 @@ struct DstRangeRelationToSrcRangeImpl<Dst,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static RangeConstraint Check(Src value) {
- return GetRangeConstraint(value < std::numeric_limits<Dst>::max(), true);
+ return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
}
};
@@ -178,7 +224,7 @@ struct DstRangeRelationToSrcRangeImpl<Dst,
return sizeof(Dst) > sizeof(Src)
? RANGE_VALID
: GetRangeConstraint(
- value < static_cast<Src>(std::numeric_limits<Dst>::max()),
+ value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
true);
}
};
@@ -195,7 +241,7 @@ struct DstRangeRelationToSrcRangeImpl<Dst,
return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
? GetRangeConstraint(true, value >= static_cast<Src>(0))
: GetRangeConstraint(
- value < static_cast<Src>(std::numeric_limits<Dst>::max()),
+ value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
value >= static_cast<Src>(0));
}
};
diff --git a/chromium/base/numerics/safe_math.h b/chromium/base/numerics/safe_math.h
index 1fab032af0f..cd89b2d5246 100644
--- a/chromium/base/numerics/safe_math.h
+++ b/chromium/base/numerics/safe_math.h
@@ -145,6 +145,14 @@ class CheckedNumeric {
return CheckedNumeric<T>(value, validity);
}
+ // This function is available only for integral types. It returns an unsigned
+ // integer of the same width as the source type, containing the absolute value
+ // of the source, and properly handling signed min.
+ CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const {
+ return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
+ CheckedUnsignedAbs(state_.value()), state_.validity());
+ }
+
CheckedNumeric& operator++() {
*this += 1;
return *this;
@@ -188,6 +196,16 @@ class CheckedNumeric {
static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
private:
+ template <typename NumericType>
+ struct UnderlyingType {
+ using type = NumericType;
+ };
+
+ template <typename NumericType>
+ struct UnderlyingType<CheckedNumeric<NumericType>> {
+ using type = NumericType;
+ };
+
CheckedNumericState<T> state_;
};
@@ -224,7 +242,8 @@ class CheckedNumeric {
template <typename T> \
template <typename Src> \
CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \
- *this = CheckedNumeric<T>::cast(*this) OP CheckedNumeric<Src>::cast(rhs); \
+ *this = CheckedNumeric<T>::cast(*this) \
+ OP CheckedNumeric<typename UnderlyingType<Src>::type>::cast(rhs); \
return *this; \
} \
/* Binary arithmetic operator for CheckedNumeric of different type. */ \
diff --git a/chromium/base/numerics/safe_math_impl.h b/chromium/base/numerics/safe_math_impl.h
index 08f2e88345f..1bb5c5b83f2 100644
--- a/chromium/base/numerics/safe_math_impl.h
+++ b/chromium/base/numerics/safe_math_impl.h
@@ -90,6 +90,25 @@ struct PositionOfSignBit {
size_t>::type value = 8 * sizeof(Integer) - 1;
};
+// This is used for UnsignedAbs, where we need to support floating-point
+// template instantiations even though we don't actually support the operations.
+// However, there is no corresponding implementation of e.g. CheckedUnsignedAbs,
+// so the float versions will not compile.
+template <typename Numeric,
+ bool IsInteger = std::numeric_limits<Numeric>::is_integer,
+ bool IsFloat = std::numeric_limits<Numeric>::is_iec559>
+struct UnsignedOrFloatForSize;
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, true, false> {
+ typedef typename UnsignedIntegerForSize<Numeric>::type type;
+};
+
+template <typename Numeric>
+struct UnsignedOrFloatForSize<Numeric, false, true> {
+ typedef Numeric type;
+};
+
// Helper templates for integer manipulations.
template <typename T>
@@ -284,11 +303,31 @@ typename enable_if<
std::numeric_limits<T>::is_integer && !std::numeric_limits<T>::is_signed,
T>::type
CheckedAbs(T value, RangeConstraint* validity) {
- // Absolute value of a positive is just its identiy.
+ // T is unsigned, so |value| must already be positive.
*validity = RANGE_VALID;
return value;
}
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<T>::is_signed,
+ typename UnsignedIntegerForSize<T>::type>::type
+CheckedUnsignedAbs(T value) {
+ typedef typename UnsignedIntegerForSize<T>::type UnsignedT;
+ return value == std::numeric_limits<T>::min()
+ ? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1
+ : static_cast<UnsignedT>(std::abs(value));
+}
+
+template <typename T>
+typename enable_if<std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ T>::type
+CheckedUnsignedAbs(T value) {
+ // T is unsigned, so |value| must already be positive.
+ return value;
+}
+
// These are the floating point stubs that the compiler needs to see. Only the
// negation operation is ever called.
#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
diff --git a/chromium/base/numerics/safe_numerics_unittest.cc b/chromium/base/numerics/safe_numerics_unittest.cc
index 6f9a966c015..81873ea2eba 100644
--- a/chromium/base/numerics/safe_numerics_unittest.cc
+++ b/chromium/base/numerics/safe_numerics_unittest.cc
@@ -18,6 +18,8 @@
using std::numeric_limits;
using base::CheckedNumeric;
using base::checked_cast;
+using base::IsValueInRangeForNumericType;
+using base::IsValueNegative;
using base::SizeT;
using base::StrictNumeric;
using base::saturated_cast;
@@ -27,6 +29,7 @@ using base::internal::RANGE_VALID;
using base::internal::RANGE_INVALID;
using base::internal::RANGE_OVERFLOW;
using base::internal::RANGE_UNDERFLOW;
+using base::internal::SignedIntegerForSize;
using base::enable_if;
// These tests deliberately cause arithmetic overflows. If the compiler is
@@ -36,6 +39,26 @@ using base::enable_if;
#pragma warning(disable:4756)
#endif
+// This is a helper function for finding the maximum value in Src that can be
+// wholy represented as the destination floating-point type.
+template <typename Dst, typename Src>
+Dst GetMaxConvertibleToFloat() {
+ typedef numeric_limits<Dst> DstLimits;
+ typedef numeric_limits<Src> SrcLimits;
+ static_assert(SrcLimits::is_specialized, "Source must be numeric.");
+ static_assert(DstLimits::is_specialized, "Destination must be numeric.");
+ CHECK(DstLimits::is_iec559);
+
+ if (SrcLimits::digits <= DstLimits::digits &&
+ MaxExponent<Src>::value <= MaxExponent<Dst>::value)
+ return SrcLimits::max();
+ Src max = SrcLimits::max() / 2 + (SrcLimits::is_integer ? 1 : 0);
+ while (max != static_cast<Src>(static_cast<Dst>(max))) {
+ max /= 2;
+ }
+ return static_cast<Dst>(max);
+}
+
// Helper macros to wrap displaying the conversion types and line numbers.
#define TEST_EXPECTED_VALIDITY(expected, actual) \
EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).validity()) \
@@ -122,6 +145,13 @@ static void TestSpecializedArithmetic(
CheckedNumeric<Dst>(DstLimits::min()) - 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
+ TEST_EXPECTED_VALIDITY(RANGE_VALID,
+ CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+ TEST_EXPECTED_VALIDITY(
+ RANGE_VALID,
+ CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
+ std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
+ .UnsignedAbs());
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
@@ -370,6 +400,18 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ if (DstLimits::is_integer) {
+ if (SrcLimits::digits < DstLimits::digits) {
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+ static_cast<Src>(DstLimits::max()));
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+ }
+ TEST_EXPECTED_RANGE(
+ RANGE_VALID,
+ static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ }
} else if (SrcLimits::is_signed) {
TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
@@ -428,6 +470,18 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
+ if (DstLimits::is_integer) {
+ if (SrcLimits::digits < DstLimits::digits) {
+ TEST_EXPECTED_RANGE(RANGE_OVERFLOW,
+ static_cast<Src>(DstLimits::max()));
+ } else {
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::max()));
+ }
+ TEST_EXPECTED_RANGE(
+ RANGE_VALID,
+ static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ }
} else {
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
}
@@ -578,6 +632,18 @@ TEST(SafeNumerics, CastTests) {
EXPECT_TRUE(CheckedNumeric<int>(StrictNumeric<unsigned>(1U)).IsValid());
EXPECT_FALSE(CheckedNumeric<unsigned>(StrictNumeric<int>(-1)).IsValid());
+ EXPECT_TRUE(IsValueNegative(-1));
+ EXPECT_TRUE(IsValueNegative(numeric_limits<int>::min()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::min()));
+ EXPECT_TRUE(IsValueNegative(-numeric_limits<double>::max()));
+ EXPECT_FALSE(IsValueNegative(0));
+ EXPECT_FALSE(IsValueNegative(1));
+ EXPECT_FALSE(IsValueNegative(0u));
+ EXPECT_FALSE(IsValueNegative(1u));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<int>::max()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::max()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<double>::max()));
+
// These casts and coercions will fail to compile:
// EXPECT_EQ(0, strict_cast<int>(static_cast<size_t>(0)));
// EXPECT_EQ(0, strict_cast<size_t>(static_cast<int>(0)));
@@ -600,3 +666,101 @@ TEST(SafeNumerics, CastTests) {
EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
}
+TEST(SafeNumerics, IsValueInRangeForNumericType) {
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(2));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0xffffffff)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(2));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffff));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0x7fffffffu));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0x80000000u));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(0xffffffffu));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x80000000)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min()) - 1));
+ EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(2));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0xffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
+ EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
+ std::numeric_limits<int64_t>::min()));
+
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(2));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(-1));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffff));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x7fffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0x80000000u));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0xffffffffu));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x80000000)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0xffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(INT64_C(0x100000000)));
+ EXPECT_TRUE(
+ IsValueInRangeForNumericType<int64_t>(INT64_C(0x7fffffffffffffff)));
+ EXPECT_TRUE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0x7fffffffffffffff)));
+ EXPECT_FALSE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0x8000000000000000)));
+ EXPECT_FALSE(
+ IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ std::numeric_limits<int32_t>::min()));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
+ std::numeric_limits<int64_t>::min()));
+}
+
+TEST(SafeNumerics, CompoundNumericOperations) {
+ CheckedNumeric<int> a = 1;
+ CheckedNumeric<int> b = 2;
+ CheckedNumeric<int> c = 3;
+ CheckedNumeric<int> d = 4;
+ a += b;
+ EXPECT_EQ(3, a.ValueOrDie());
+ a -= c;
+ EXPECT_EQ(0, a.ValueOrDie());
+ d /= b;
+ EXPECT_EQ(2, d.ValueOrDie());
+ d *= d;
+ EXPECT_EQ(4, d.ValueOrDie());
+
+ CheckedNumeric<int> too_large = std::numeric_limits<int>::max();
+ EXPECT_TRUE(too_large.IsValid());
+ too_large += d;
+ EXPECT_FALSE(too_large.IsValid());
+ too_large -= d;
+ EXPECT_FALSE(too_large.IsValid());
+ too_large /= d;
+ EXPECT_FALSE(too_large.IsValid());
+}
diff --git a/chromium/base/path_service.cc b/chromium/base/path_service.cc
index 3c437ee7493..97a0ce5c02c 100644
--- a/chromium/base/path_service.cc
+++ b/chromium/base/path_service.cc
@@ -17,27 +17,25 @@
#include "base/logging.h"
#include "base/synchronization/lock.h"
-using base::FilePath;
-using base::MakeAbsoluteFilePath;
-
namespace base {
- bool PathProvider(int key, FilePath* result);
+
+bool PathProvider(int key, FilePath* result);
+
#if defined(OS_WIN)
- bool PathProviderWin(int key, FilePath* result);
+bool PathProviderWin(int key, FilePath* result);
#elif defined(OS_MACOSX)
- bool PathProviderMac(int key, FilePath* result);
+bool PathProviderMac(int key, FilePath* result);
#elif defined(OS_ANDROID)
- bool PathProviderAndroid(int key, FilePath* result);
+bool PathProviderAndroid(int key, FilePath* result);
#elif defined(OS_POSIX)
- // PathProviderPosix is the default path provider on POSIX OSes other than
- // Mac and Android.
- bool PathProviderPosix(int key, FilePath* result);
+// PathProviderPosix is the default path provider on POSIX OSes other than
+// Mac and Android.
+bool PathProviderPosix(int key, FilePath* result);
#endif
-} // namespace base
namespace {
-typedef base::hash_map<int, FilePath> PathMap;
+typedef hash_map<int, FilePath> PathMap;
// We keep a linked list of providers. In a debug build we ensure that no two
// providers claim overlapping keys.
@@ -52,22 +50,22 @@ struct Provider {
};
Provider base_provider = {
- base::PathProvider,
+ PathProvider,
NULL,
#ifndef NDEBUG
- base::PATH_START,
- base::PATH_END,
+ PATH_START,
+ PATH_END,
#endif
true
};
#if defined(OS_WIN)
Provider base_provider_win = {
- base::PathProviderWin,
+ PathProviderWin,
&base_provider,
#ifndef NDEBUG
- base::PATH_WIN_START,
- base::PATH_WIN_END,
+ PATH_WIN_START,
+ PATH_WIN_END,
#endif
true
};
@@ -75,11 +73,11 @@ Provider base_provider_win = {
#if defined(OS_MACOSX)
Provider base_provider_mac = {
- base::PathProviderMac,
+ PathProviderMac,
&base_provider,
#ifndef NDEBUG
- base::PATH_MAC_START,
- base::PATH_MAC_END,
+ PATH_MAC_START,
+ PATH_MAC_END,
#endif
true
};
@@ -87,11 +85,11 @@ Provider base_provider_mac = {
#if defined(OS_ANDROID)
Provider base_provider_android = {
- base::PathProviderAndroid,
+ PathProviderAndroid,
&base_provider,
#ifndef NDEBUG
- base::PATH_ANDROID_START,
- base::PATH_ANDROID_END,
+ PATH_ANDROID_START,
+ PATH_ANDROID_END,
#endif
true
};
@@ -99,11 +97,11 @@ Provider base_provider_android = {
#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
Provider base_provider_posix = {
- base::PathProviderPosix,
+ PathProviderPosix,
&base_provider,
#ifndef NDEBUG
- base::PATH_POSIX_START,
- base::PATH_POSIX_END,
+ PATH_POSIX_START,
+ PATH_POSIX_END,
#endif
true
};
@@ -111,7 +109,7 @@ Provider base_provider_posix = {
struct PathData {
- base::Lock lock;
+ Lock lock;
PathMap cache; // Cache mappings from path key to path value.
PathMap overrides; // Track path overrides.
Provider* providers; // Linked list of path service providers.
@@ -140,7 +138,7 @@ struct PathData {
}
};
-static base::LazyInstance<PathData> g_path_data = LAZY_INSTANCE_INITIALIZER;
+static LazyInstance<PathData> g_path_data = LAZY_INSTANCE_INITIALIZER;
static PathData* GetPathData() {
return g_path_data.Pointer();
@@ -183,15 +181,15 @@ bool PathService::Get(int key, FilePath* result) {
PathData* path_data = GetPathData();
DCHECK(path_data);
DCHECK(result);
- DCHECK_GE(key, base::DIR_CURRENT);
+ DCHECK_GE(key, DIR_CURRENT);
// special case the current directory because it can never be cached
- if (key == base::DIR_CURRENT)
- return base::GetCurrentDirectory(result);
+ if (key == DIR_CURRENT)
+ return GetCurrentDirectory(result);
Provider* provider = NULL;
{
- base::AutoLock scoped_lock(path_data->lock);
+ AutoLock scoped_lock(path_data->lock);
if (LockedGetFromCache(key, path_data, result))
return true;
@@ -224,7 +222,7 @@ bool PathService::Get(int key, FilePath* result) {
}
*result = path;
- base::AutoLock scoped_lock(path_data->lock);
+ AutoLock scoped_lock(path_data->lock);
if (!path_data->cache_disabled)
path_data->cache[key] = path;
@@ -245,7 +243,7 @@ bool PathService::OverrideAndCreateIfNeeded(int key,
bool create) {
PathData* path_data = GetPathData();
DCHECK(path_data);
- DCHECK_GT(key, base::DIR_CURRENT) << "invalid path key";
+ DCHECK_GT(key, DIR_CURRENT) << "invalid path key";
FilePath file_path = path;
@@ -255,8 +253,7 @@ bool PathService::OverrideAndCreateIfNeeded(int key,
// Make sure the directory exists. We need to do this before we translate
// this to the absolute path because on POSIX, MakeAbsoluteFilePath fails
// if called on a non-existent path.
- if (!base::PathExists(file_path) &&
- !base::CreateDirectory(file_path))
+ if (!PathExists(file_path) && !CreateDirectory(file_path))
return false;
}
@@ -268,7 +265,7 @@ bool PathService::OverrideAndCreateIfNeeded(int key,
}
DCHECK(file_path.IsAbsolute());
- base::AutoLock scoped_lock(path_data->lock);
+ AutoLock scoped_lock(path_data->lock);
// Clear the cache now. Some of its entries could have depended
// on the value we are overriding, and are now out of sync with reality.
@@ -284,7 +281,7 @@ bool PathService::RemoveOverride(int key) {
PathData* path_data = GetPathData();
DCHECK(path_data);
- base::AutoLock scoped_lock(path_data->lock);
+ AutoLock scoped_lock(path_data->lock);
if (path_data->overrides.find(key) == path_data->overrides.end())
return false;
@@ -315,7 +312,7 @@ void PathService::RegisterProvider(ProviderFunc func, int key_start,
p->key_end = key_end;
#endif
- base::AutoLock scoped_lock(path_data->lock);
+ AutoLock scoped_lock(path_data->lock);
#ifndef NDEBUG
Provider *iter = path_data->providers;
@@ -335,7 +332,9 @@ void PathService::DisableCache() {
PathData* path_data = GetPathData();
DCHECK(path_data);
- base::AutoLock scoped_lock(path_data->lock);
+ AutoLock scoped_lock(path_data->lock);
path_data->cache.clear();
path_data->cache_disabled = true;
}
+
+} // namespace base
diff --git a/chromium/base/path_service.h b/chromium/base/path_service.h
index 025550f2ad1..c7f1abe7149 100644
--- a/chromium/base/path_service.h
+++ b/chromium/base/path_service.h
@@ -13,9 +13,9 @@
#include "build/build_config.h"
namespace base {
+
class FilePath;
class ScopedPathOverride;
-} // namespace base
// The path service is a global table mapping keys to file system paths. It is
// OK to use this service from multiple threads.
@@ -29,7 +29,7 @@ class BASE_EXPORT PathService {
//
// Returns true if the directory or file was successfully retrieved. On
// failure, 'path' will not be changed.
- static bool Get(int key, base::FilePath* path);
+ static bool Get(int key, FilePath* path);
// Overrides the path to a special directory or file. This cannot be used to
// change the value of DIR_CURRENT, but that should be obvious. Also, if the
@@ -44,7 +44,7 @@ class BASE_EXPORT PathService {
//
// Unit tests generally should use ScopedPathOverride instead. Overrides from
// one test should not carry over to another.
- static bool Override(int key, const base::FilePath& path);
+ static bool Override(int key, const FilePath& path);
// This function does the same as PathService::Override but it takes extra
// parameters:
@@ -56,7 +56,7 @@ class BASE_EXPORT PathService {
// - |create| guides whether the directory to be overriden must
// be created in case it doesn't exist already.
static bool OverrideAndCreateIfNeeded(int key,
- const base::FilePath& path,
+ const FilePath& path,
bool is_absolute,
bool create);
@@ -68,7 +68,7 @@ class BASE_EXPORT PathService {
// WARNING: This function could be called on any thread from which the
// PathService is used, so a the ProviderFunc MUST BE THREADSAFE.
//
- typedef bool (*ProviderFunc)(int, base::FilePath*);
+ typedef bool (*ProviderFunc)(int, FilePath*);
// Call to register a path provider. You must specify the range "[key_start,
// key_end)" of supported path keys.
@@ -80,7 +80,7 @@ class BASE_EXPORT PathService {
static void DisableCache();
private:
- friend class base::ScopedPathOverride;
+ friend class ScopedPathOverride;
FRIEND_TEST_ALL_PREFIXES(PathServiceTest, RemoveOverride);
// Removes an override for a special directory or file. Returns true if there
@@ -89,4 +89,9 @@ class BASE_EXPORT PathService {
static bool RemoveOverride(int key);
};
+} // namespace base
+
+// TODO(brettw) Convert all callers to using the base namespace and remove this.
+using base::PathService;
+
#endif // BASE_PATH_SERVICE_H_
diff --git a/chromium/base/path_service_unittest.cc b/chromium/base/path_service_unittest.cc
index 7551d676416..569c0f482e7 100644
--- a/chromium/base/path_service_unittest.cc
+++ b/chromium/base/path_service_unittest.cc
@@ -18,12 +18,14 @@
#include "base/win/windows_version.h"
#endif
+namespace base {
+
namespace {
// Returns true if PathService::Get returns true and sets the path parameter
// to non-empty for the given PathService::DirType enumeration value.
bool ReturnsValidPath(int dir_type) {
- base::FilePath path;
+ FilePath path;
bool result = PathService::Get(dir_type, &path);
// Some paths might not exist on some platforms in which case confirming
@@ -32,25 +34,25 @@ bool ReturnsValidPath(int dir_type) {
#if defined(OS_POSIX)
// If chromium has never been started on this account, the cache path may not
// exist.
- if (dir_type == base::DIR_CACHE)
+ if (dir_type == DIR_CACHE)
check_path_exists = false;
#endif
#if defined(OS_LINUX)
// On the linux try-bots: a path is returned (e.g. /home/chrome-bot/Desktop),
// but it doesn't exist.
- if (dir_type == base::DIR_USER_DESKTOP)
+ if (dir_type == DIR_USER_DESKTOP)
check_path_exists = false;
#endif
#if defined(OS_WIN)
- if (dir_type == base::DIR_TASKBAR_PINS) {
+ if (dir_type == DIR_TASKBAR_PINS) {
// There is no pinned-to-taskbar shortcuts prior to Win7.
if (base::win::GetVersion() < base::win::VERSION_WIN7)
check_path_exists = false;
}
#endif
#if defined(OS_MACOSX)
- if (dir_type != base::DIR_EXE && dir_type != base::DIR_MODULE &&
- dir_type != base::FILE_EXE && dir_type != base::FILE_MODULE) {
+ if (dir_type != DIR_EXE && dir_type != DIR_MODULE &&
+ dir_type != FILE_EXE && dir_type != FILE_MODULE) {
if (path.ReferencesParent())
return false;
}
@@ -58,8 +60,7 @@ bool ReturnsValidPath(int dir_type) {
if (path.ReferencesParent())
return false;
#endif
- return result && !path.empty() && (!check_path_exists ||
- base::PathExists(path));
+ return result && !path.empty() && (!check_path_exists || PathExists(path));
}
#if defined(OS_WIN)
@@ -67,7 +68,7 @@ bool ReturnsValidPath(int dir_type) {
// of Windows. Checks that the function fails and that the returned path is
// empty.
bool ReturnsInvalidPath(int dir_type) {
- base::FilePath path;
+ FilePath path;
bool result = PathService::Get(dir_type, &path);
return !result && path.empty();
}
@@ -84,21 +85,21 @@ typedef PlatformTest PathServiceTest;
// later changes to Get broke the semantics of the function and yielded the
// correct value while returning false.)
TEST_F(PathServiceTest, Get) {
- for (int key = base::PATH_START + 1; key < base::PATH_END; ++key) {
+ for (int key = PATH_START + 1; key < PATH_END; ++key) {
#if defined(OS_ANDROID)
- if (key == base::FILE_MODULE || key == base::DIR_USER_DESKTOP ||
- key == base::DIR_HOME)
+ if (key == FILE_MODULE || key == DIR_USER_DESKTOP ||
+ key == DIR_HOME)
continue; // Android doesn't implement these.
#elif defined(OS_IOS)
- if (key == base::DIR_USER_DESKTOP)
+ if (key == DIR_USER_DESKTOP)
continue; // iOS doesn't implement DIR_USER_DESKTOP;
#endif
EXPECT_PRED1(ReturnsValidPath, key);
}
#if defined(OS_WIN)
- for (int key = base::PATH_WIN_START + 1; key < base::PATH_WIN_END; ++key) {
+ for (int key = PATH_WIN_START + 1; key < PATH_WIN_END; ++key) {
bool valid = true;
- if (key == base::DIR_APP_SHORTCUTS)
+ if (key == DIR_APP_SHORTCUTS)
valid = base::win::GetVersion() >= base::win::VERSION_WIN8;
if (valid)
@@ -107,16 +108,16 @@ TEST_F(PathServiceTest, Get) {
EXPECT_TRUE(ReturnsInvalidPath(key)) << key;
}
#elif defined(OS_MACOSX)
- for (int key = base::PATH_MAC_START + 1; key < base::PATH_MAC_END; ++key) {
+ for (int key = PATH_MAC_START + 1; key < PATH_MAC_END; ++key) {
EXPECT_PRED1(ReturnsValidPath, key);
}
#elif defined(OS_ANDROID)
- for (int key = base::PATH_ANDROID_START + 1; key < base::PATH_ANDROID_END;
+ for (int key = PATH_ANDROID_START + 1; key < PATH_ANDROID_END;
++key) {
EXPECT_PRED1(ReturnsValidPath, key);
}
#elif defined(OS_POSIX)
- for (int key = base::PATH_POSIX_START + 1; key < base::PATH_POSIX_END;
+ for (int key = PATH_POSIX_START + 1; key < PATH_POSIX_END;
++key) {
EXPECT_PRED1(ReturnsValidPath, key);
}
@@ -127,32 +128,32 @@ TEST_F(PathServiceTest, Get) {
// are supposed to do.
TEST_F(PathServiceTest, Override) {
int my_special_key = 666;
- base::ScopedTempDir temp_dir;
+ ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- base::FilePath fake_cache_dir(temp_dir.path().AppendASCII("cache"));
+ FilePath fake_cache_dir(temp_dir.path().AppendASCII("cache"));
// PathService::Override should always create the path provided if it doesn't
// exist.
EXPECT_TRUE(PathService::Override(my_special_key, fake_cache_dir));
- EXPECT_TRUE(base::PathExists(fake_cache_dir));
+ EXPECT_TRUE(PathExists(fake_cache_dir));
- base::FilePath fake_cache_dir2(temp_dir.path().AppendASCII("cache2"));
+ FilePath fake_cache_dir2(temp_dir.path().AppendASCII("cache2"));
// PathService::OverrideAndCreateIfNeeded should obey the |create| parameter.
PathService::OverrideAndCreateIfNeeded(my_special_key,
fake_cache_dir2,
false,
false);
- EXPECT_FALSE(base::PathExists(fake_cache_dir2));
+ EXPECT_FALSE(PathExists(fake_cache_dir2));
EXPECT_TRUE(PathService::OverrideAndCreateIfNeeded(my_special_key,
fake_cache_dir2,
false,
true));
- EXPECT_TRUE(base::PathExists(fake_cache_dir2));
+ EXPECT_TRUE(PathExists(fake_cache_dir2));
#if defined(OS_POSIX)
- base::FilePath non_existent(
- base::MakeAbsoluteFilePath(temp_dir.path()).AppendASCII("non_existent"));
+ FilePath non_existent(
+ MakeAbsoluteFilePath(temp_dir.path()).AppendASCII("non_existent"));
EXPECT_TRUE(non_existent.IsAbsolute());
- EXPECT_FALSE(base::PathExists(non_existent));
+ EXPECT_FALSE(PathExists(non_existent));
#if !defined(OS_ANDROID)
// This fails because MakeAbsoluteFilePath fails for non-existent files.
// Earlier versions of Bionic libc don't fail for non-existent files, so
@@ -169,8 +170,8 @@ TEST_F(PathServiceTest, Override) {
true,
false));
// Check that the path has been overridden and no directory was created.
- EXPECT_FALSE(base::PathExists(non_existent));
- base::FilePath path;
+ EXPECT_FALSE(PathExists(non_existent));
+ FilePath path;
EXPECT_TRUE(PathService::Get(my_special_key, &path));
EXPECT_EQ(non_existent, path);
#endif
@@ -179,62 +180,62 @@ TEST_F(PathServiceTest, Override) {
// Check if multiple overrides can co-exist.
TEST_F(PathServiceTest, OverrideMultiple) {
int my_special_key = 666;
- base::ScopedTempDir temp_dir;
+ ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- base::FilePath fake_cache_dir1(temp_dir.path().AppendASCII("1"));
+ FilePath fake_cache_dir1(temp_dir.path().AppendASCII("1"));
EXPECT_TRUE(PathService::Override(my_special_key, fake_cache_dir1));
- EXPECT_TRUE(base::PathExists(fake_cache_dir1));
- ASSERT_EQ(1, base::WriteFile(fake_cache_dir1.AppendASCII("t1"), ".", 1));
+ EXPECT_TRUE(PathExists(fake_cache_dir1));
+ ASSERT_EQ(1, WriteFile(fake_cache_dir1.AppendASCII("t1"), ".", 1));
- base::FilePath fake_cache_dir2(temp_dir.path().AppendASCII("2"));
+ FilePath fake_cache_dir2(temp_dir.path().AppendASCII("2"));
EXPECT_TRUE(PathService::Override(my_special_key + 1, fake_cache_dir2));
- EXPECT_TRUE(base::PathExists(fake_cache_dir2));
- ASSERT_EQ(1, base::WriteFile(fake_cache_dir2.AppendASCII("t2"), ".", 1));
+ EXPECT_TRUE(PathExists(fake_cache_dir2));
+ ASSERT_EQ(1, WriteFile(fake_cache_dir2.AppendASCII("t2"), ".", 1));
- base::FilePath result;
+ FilePath result;
EXPECT_TRUE(PathService::Get(my_special_key, &result));
// Override might have changed the path representation but our test file
// should be still there.
- EXPECT_TRUE(base::PathExists(result.AppendASCII("t1")));
+ EXPECT_TRUE(PathExists(result.AppendASCII("t1")));
EXPECT_TRUE(PathService::Get(my_special_key + 1, &result));
- EXPECT_TRUE(base::PathExists(result.AppendASCII("t2")));
+ EXPECT_TRUE(PathExists(result.AppendASCII("t2")));
}
TEST_F(PathServiceTest, RemoveOverride) {
// Before we start the test we have to call RemoveOverride at least once to
// clear any overrides that might have been left from other tests.
- PathService::RemoveOverride(base::DIR_TEMP);
+ PathService::RemoveOverride(DIR_TEMP);
- base::FilePath original_user_data_dir;
- EXPECT_TRUE(PathService::Get(base::DIR_TEMP, &original_user_data_dir));
- EXPECT_FALSE(PathService::RemoveOverride(base::DIR_TEMP));
+ FilePath original_user_data_dir;
+ EXPECT_TRUE(PathService::Get(DIR_TEMP, &original_user_data_dir));
+ EXPECT_FALSE(PathService::RemoveOverride(DIR_TEMP));
- base::ScopedTempDir temp_dir;
+ ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
- EXPECT_TRUE(PathService::Override(base::DIR_TEMP, temp_dir.path()));
- base::FilePath new_user_data_dir;
- EXPECT_TRUE(PathService::Get(base::DIR_TEMP, &new_user_data_dir));
+ EXPECT_TRUE(PathService::Override(DIR_TEMP, temp_dir.path()));
+ FilePath new_user_data_dir;
+ EXPECT_TRUE(PathService::Get(DIR_TEMP, &new_user_data_dir));
EXPECT_NE(original_user_data_dir, new_user_data_dir);
- EXPECT_TRUE(PathService::RemoveOverride(base::DIR_TEMP));
- EXPECT_TRUE(PathService::Get(base::DIR_TEMP, &new_user_data_dir));
+ EXPECT_TRUE(PathService::RemoveOverride(DIR_TEMP));
+ EXPECT_TRUE(PathService::Get(DIR_TEMP, &new_user_data_dir));
EXPECT_EQ(original_user_data_dir, new_user_data_dir);
}
#if defined(OS_WIN)
TEST_F(PathServiceTest, GetProgramFiles) {
- base::FilePath programfiles_dir;
+ FilePath programfiles_dir;
#if defined(_WIN64)
// 64-bit on 64-bit.
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILES,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files"));
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILESX86,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILESX86,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files (x86)"));
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILES6432,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES6432,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files"));
@@ -242,29 +243,29 @@ TEST_F(PathServiceTest, GetProgramFiles) {
if (base::win::OSInfo::GetInstance()->wow64_status() ==
base::win::OSInfo::WOW64_ENABLED) {
// 32-bit on 64-bit.
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILES,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files (x86)"));
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILESX86,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILESX86,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files (x86)"));
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILES6432,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES6432,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files"));
} else {
// 32-bit on 32-bit.
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILES,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files"));
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILESX86,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILESX86,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files"));
- EXPECT_TRUE(PathService::Get(base::DIR_PROGRAM_FILES6432,
+ EXPECT_TRUE(PathService::Get(DIR_PROGRAM_FILES6432,
&programfiles_dir));
EXPECT_EQ(programfiles_dir.value(),
FILE_PATH_LITERAL("C:\\Program Files"));
@@ -272,3 +273,5 @@ TEST_F(PathServiceTest, GetProgramFiles) {
#endif
}
#endif
+
+} // namespace base
diff --git a/chromium/base/pickle.cc b/chromium/base/pickle.cc
index cf4a865d35c..489c7f890a7 100644
--- a/chromium/base/pickle.cc
+++ b/chromium/base/pickle.cc
@@ -7,6 +7,10 @@
#include <stdlib.h>
#include <algorithm> // for max()
+#include <limits>
+
+#include "base/bits.h"
+#include "base/macros.h"
namespace base {
@@ -34,7 +38,7 @@ inline bool PickleIterator::ReadBuiltinType(Type* result) {
}
inline void PickleIterator::Advance(size_t size) {
- size_t aligned_size = AlignInt(size, sizeof(uint32_t));
+ size_t aligned_size = bits::Align(size, sizeof(uint32_t));
if (end_index_ - read_index_ < aligned_size) {
read_index_ = end_index_;
} else {
@@ -210,13 +214,15 @@ Pickle::Pickle()
header_size_(sizeof(Header)),
capacity_after_header_(0),
write_offset_(0) {
+ static_assert((Pickle::kPayloadUnit & (Pickle::kPayloadUnit - 1)) == 0,
+ "Pickle::kPayloadUnit must be a power of two");
Resize(kPayloadUnit);
header_->payload_size = 0;
}
Pickle::Pickle(int header_size)
: header_(NULL),
- header_size_(AlignInt(header_size, sizeof(uint32))),
+ header_size_(bits::Align(header_size, sizeof(uint32))),
capacity_after_header_(0),
write_offset_(0) {
DCHECK_GE(static_cast<size_t>(header_size), sizeof(Header));
@@ -236,7 +242,7 @@ Pickle::Pickle(const char* data, int data_len)
if (header_size_ > static_cast<unsigned int>(data_len))
header_size_ = 0;
- if (header_size_ != AlignInt(header_size_, sizeof(uint32)))
+ if (header_size_ != bits::Align(header_size_, sizeof(uint32)))
header_size_ = 0;
// If there is anything wrong with the data, we're not going to use it.
@@ -249,9 +255,8 @@ Pickle::Pickle(const Pickle& other)
header_size_(other.header_size_),
capacity_after_header_(0),
write_offset_(other.write_offset_) {
- size_t payload_size = header_size_ + other.header_->payload_size;
- Resize(payload_size);
- memcpy(header_, other.header_, payload_size);
+ Resize(other.header_->payload_size);
+ memcpy(header_, other.header_, header_size_ + other.header_->payload_size);
}
Pickle::~Pickle() {
@@ -305,7 +310,7 @@ bool Pickle::WriteBytes(const void* data, int length) {
}
void Pickle::Reserve(size_t length) {
- size_t data_len = AlignInt(length, sizeof(uint32));
+ size_t data_len = bits::Align(length, sizeof(uint32));
DCHECK_GE(data_len, length);
#ifdef ARCH_CPU_64_BITS
DCHECK_LE(data_len, kuint32max);
@@ -318,7 +323,7 @@ void Pickle::Reserve(size_t length) {
void Pickle::Resize(size_t new_capacity) {
CHECK_NE(capacity_after_header_, kCapacityReadOnly);
- capacity_after_header_ = AlignInt(new_capacity, kPayloadUnit);
+ capacity_after_header_ = bits::Align(new_capacity, kPayloadUnit);
void* p = realloc(header_, GetTotalAllocatedSize());
CHECK(p);
header_ = reinterpret_cast<Header*>(p);
@@ -334,17 +339,41 @@ size_t Pickle::GetTotalAllocatedSize() const {
const char* Pickle::FindNext(size_t header_size,
const char* start,
const char* end) {
- DCHECK_EQ(header_size, AlignInt(header_size, sizeof(uint32)));
+ size_t pickle_size = 0;
+ if (!PeekNext(header_size, start, end, &pickle_size))
+ return NULL;
+
+ if (pickle_size > static_cast<size_t>(end - start))
+ return NULL;
+
+ return start + pickle_size;
+}
+
+// static
+bool Pickle::PeekNext(size_t header_size,
+ const char* start,
+ const char* end,
+ size_t* pickle_size) {
+ DCHECK_EQ(header_size, bits::Align(header_size, sizeof(uint32)));
+ DCHECK_GE(header_size, sizeof(Header));
DCHECK_LE(header_size, static_cast<size_t>(kPayloadUnit));
size_t length = static_cast<size_t>(end - start);
if (length < sizeof(Header))
- return NULL;
+ return false;
const Header* hdr = reinterpret_cast<const Header*>(start);
- if (length < header_size || length - header_size < hdr->payload_size)
- return NULL;
- return start + header_size + hdr->payload_size;
+ if (length < header_size)
+ return false;
+
+ if (hdr->payload_size > std::numeric_limits<size_t>::max() - header_size) {
+ // If payload_size causes an overflow, we return maximum possible
+ // pickle size to indicate that.
+ *pickle_size = std::numeric_limits<size_t>::max();
+ } else {
+ *pickle_size = header_size + hdr->payload_size;
+ }
+ return true;
}
template <size_t length> void Pickle::WriteBytesStatic(const void* data) {
@@ -359,7 +388,7 @@ inline void Pickle::WriteBytesCommon(const void* data, size_t length) {
DCHECK_NE(kCapacityReadOnly, capacity_after_header_)
<< "oops: pickle is readonly";
MSAN_CHECK_MEM_IS_INITIALIZED(data, length);
- size_t data_len = AlignInt(length, sizeof(uint32));
+ size_t data_len = bits::Align(length, sizeof(uint32));
DCHECK_GE(data_len, length);
#ifdef ARCH_CPU_64_BITS
DCHECK_LE(data_len, kuint32max);
@@ -367,7 +396,11 @@ inline void Pickle::WriteBytesCommon(const void* data, size_t length) {
DCHECK_LE(write_offset_, kuint32max - data_len);
size_t new_size = write_offset_ + data_len;
if (new_size > capacity_after_header_) {
- Resize(std::max(capacity_after_header_ * 2, new_size));
+ size_t new_capacity = capacity_after_header_ * 2;
+ const size_t kPickleHeapAlign = 4096;
+ if (new_capacity > kPickleHeapAlign)
+ new_capacity = bits::Align(new_capacity, kPickleHeapAlign) - kPayloadUnit;
+ Resize(std::max(new_capacity, new_size));
}
char* write = mutable_payload() + write_offset_;
diff --git a/chromium/base/pickle.h b/chromium/base/pickle.h
index c9fef715a7b..22b8055cbae 100644
--- a/chromium/base/pickle.h
+++ b/chromium/base/pickle.h
@@ -74,11 +74,6 @@ class BASE_EXPORT PickleIterator {
}
private:
- // Aligns 'i' by rounding it up to the next multiple of 'alignment'.
- static size_t AlignInt(size_t i, int alignment) {
- return i + (alignment - (i % alignment)) % alignment;
- }
-
// Read Type from Pickle.
template <typename Type>
bool ReadBuiltinType(Type* result);
@@ -270,17 +265,24 @@ class BASE_EXPORT Pickle {
// of the header.
void Resize(size_t new_capacity);
- // Aligns 'i' by rounding it up to the next multiple of 'alignment'
- static size_t AlignInt(size_t i, int alignment) {
- return i + (alignment - (i % alignment)) % alignment;
- }
-
// Find the end of the pickled data that starts at range_start. Returns NULL
// if the entire Pickle is not found in the given data range.
static const char* FindNext(size_t header_size,
const char* range_start,
const char* range_end);
+ // Parse pickle header and return total size of the pickle. Data range
+ // doesn't need to contain entire pickle.
+ // Returns true if pickle header was found and parsed. Callers must check
+ // returned |pickle_size| for sanity (against maximum message size, etc).
+ // NOTE: when function successfully parses a header, but encounters an
+ // overflow during pickle size calculation, it sets |pickle_size| to the
+ // maximum size_t value and returns true.
+ static bool PeekNext(size_t header_size,
+ const char* range_start,
+ const char* range_end,
+ size_t* pickle_size);
+
// The allocation granularity of the payload.
static const int kPayloadUnit;
@@ -306,7 +308,10 @@ class BASE_EXPORT Pickle {
}
inline void WriteBytesCommon(const void* data, size_t length);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, DeepCopyResize);
FRIEND_TEST_ALL_PREFIXES(PickleTest, Resize);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, PeekNext);
+ FRIEND_TEST_ALL_PREFIXES(PickleTest, PeekNextOverflow);
FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNext);
FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNextWithIncompleteHeader);
FRIEND_TEST_ALL_PREFIXES(PickleTest, FindNextOverflow);
diff --git a/chromium/base/pickle_unittest.cc b/chromium/base/pickle_unittest.cc
index b0a8f21f89f..f58e7eceaf4 100644
--- a/chromium/base/pickle_unittest.cc
+++ b/chromium/base/pickle_unittest.cc
@@ -233,6 +233,88 @@ TEST(PickleTest, BadLenStr16) {
EXPECT_FALSE(iter.ReadString16(&outstr));
}
+TEST(PickleTest, PeekNext) {
+ struct CustomHeader : base::Pickle::Header {
+ int cookies[10];
+ };
+
+ Pickle pickle(sizeof(CustomHeader));
+
+ EXPECT_TRUE(pickle.WriteString("Goooooooooooogle"));
+
+ const char* pickle_data = static_cast<const char*>(pickle.data());
+
+ size_t pickle_size;
+
+ // Data range doesn't contain header
+ EXPECT_FALSE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader) - 1,
+ &pickle_size));
+
+ // Data range contains header
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, pickle.size());
+
+ // Data range contains header and some other data
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader) + 1,
+ &pickle_size));
+ EXPECT_EQ(pickle_size, pickle.size());
+
+ // Data range contains full pickle
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + pickle.size(),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, pickle.size());
+}
+
+TEST(PickleTest, PeekNextOverflow) {
+ struct CustomHeader : base::Pickle::Header {
+ int cookies[10];
+ };
+
+ CustomHeader header;
+
+ // Check if we can wrap around at all
+ if (sizeof(size_t) > sizeof(header.payload_size))
+ return;
+
+ const char* pickle_data = reinterpret_cast<const char*>(&header);
+
+ size_t pickle_size;
+
+ // Wrapping around is detected and reported as maximum size_t value
+ header.payload_size = static_cast<uint32_t>(
+ 1 - static_cast<int32_t>(sizeof(CustomHeader)));
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, std::numeric_limits<size_t>::max());
+
+ // Ridiculous pickle sizes are fine (callers are supposed to
+ // verify them)
+ header.payload_size =
+ std::numeric_limits<uint32_t>::max() / 2 - sizeof(CustomHeader);
+ EXPECT_TRUE(Pickle::PeekNext(
+ sizeof(CustomHeader),
+ pickle_data,
+ pickle_data + sizeof(CustomHeader),
+ &pickle_size));
+ EXPECT_EQ(pickle_size, std::numeric_limits<uint32_t>::max() / 2);
+}
+
TEST(PickleTest, FindNext) {
Pickle pickle;
EXPECT_TRUE(pickle.WriteInt(1));
@@ -428,4 +510,18 @@ TEST(PickleTest, ReadBytes) {
EXPECT_EQ(data, outdata);
}
+// Checks that when a pickle is deep-copied, the result is not larger than
+// needed.
+TEST(PickleTest, DeepCopyResize) {
+ Pickle pickle;
+ while (pickle.capacity_after_header() != pickle.payload_size())
+ pickle.WriteBool(true);
+
+ // Make a deep copy.
+ Pickle pickle2(pickle);
+
+ // Check that there isn't any extraneous capacity.
+ EXPECT_EQ(pickle.capacity_after_header(), pickle2.capacity_after_header());
+}
+
} // namespace base
diff --git a/chromium/base/power_monitor/power_monitor_device_source.h b/chromium/base/power_monitor/power_monitor_device_source.h
index 29f17c2a7d9..fa0b039bfb4 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.h
+++ b/chromium/base/power_monitor/power_monitor_device_source.h
@@ -102,7 +102,7 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
#endif
#if defined(ENABLE_BATTERY_MONITORING)
- base::OneShotTimer<PowerMonitorDeviceSource> delayed_battery_check_;
+ base::OneShotTimer delayed_battery_check_;
#endif
#if defined(OS_WIN)
diff --git a/chromium/base/power_monitor/power_monitor_device_source_android.cc b/chromium/base/power_monitor/power_monitor_device_source_android.cc
index 4d9eb527800..9671c30761e 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_android.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source_android.cc
@@ -19,15 +19,15 @@ void ProcessPowerEventHelper(PowerMonitorSource::PowerEvent event) {
namespace android {
// Native implementation of PowerMonitor.java.
-void OnBatteryChargingChanged(JNIEnv* env, jclass clazz) {
+void OnBatteryChargingChanged(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
ProcessPowerEventHelper(PowerMonitorSource::POWER_STATE_EVENT);
}
-void OnMainActivityResumed(JNIEnv* env, jclass clazz) {
+void OnMainActivityResumed(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
ProcessPowerEventHelper(PowerMonitorSource::RESUME_EVENT);
}
-void OnMainActivitySuspended(JNIEnv* env, jclass clazz) {
+void OnMainActivitySuspended(JNIEnv* env, const JavaParamRef<jclass>& clazz) {
ProcessPowerEventHelper(PowerMonitorSource::SUSPEND_EVENT);
}
diff --git a/chromium/base/power_monitor/power_monitor_device_source_win.cc b/chromium/base/power_monitor/power_monitor_device_source_win.cc
index 69bc6aa2cdc..b8b16e1d344 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_win.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source_win.cc
@@ -5,7 +5,6 @@
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_monitor_device_source.h"
#include "base/power_monitor/power_monitor_source.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/win/wrapped_window_proc.h"
namespace base {
diff --git a/chromium/base/prefs/OWNERS b/chromium/base/prefs/OWNERS
index 97ab6952245..2d870381cb7 100644
--- a/chromium/base/prefs/OWNERS
+++ b/chromium/base/prefs/OWNERS
@@ -1,5 +1,4 @@
battre@chromium.org
bauerb@chromium.org
gab@chromium.org
-mnissler@chromium.org
pam@chromium.org
diff --git a/chromium/base/prefs/json_pref_store.cc b/chromium/base/prefs/json_pref_store.cc
index 354fd948681..87943d168ed 100644
--- a/chromium/base/prefs/json_pref_store.cc
+++ b/chromium/base/prefs/json_pref_store.cc
@@ -60,16 +60,12 @@ PersistentPrefStore::PrefReadError HandleReadErrors(
switch (error_code) {
case JSONFileValueDeserializer::JSON_ACCESS_DENIED:
return PersistentPrefStore::PREF_READ_ERROR_ACCESS_DENIED;
- break;
case JSONFileValueDeserializer::JSON_CANNOT_READ_FILE:
return PersistentPrefStore::PREF_READ_ERROR_FILE_OTHER;
- break;
case JSONFileValueDeserializer::JSON_FILE_LOCKED:
return PersistentPrefStore::PREF_READ_ERROR_FILE_LOCKED;
- break;
case JSONFileValueDeserializer::JSON_NO_SUCH_FILE:
return PersistentPrefStore::PREF_READ_ERROR_NO_FILE;
- break;
default:
// JSON errors indicate file corruption of some sort.
// Since the file is corrupt, move it to the side and continue with
@@ -87,9 +83,9 @@ PersistentPrefStore::PrefReadError HandleReadErrors(
return bad_existed ? PersistentPrefStore::PREF_READ_ERROR_JSON_REPEAT
: PersistentPrefStore::PREF_READ_ERROR_JSON_PARSE;
}
- } else if (!value->IsType(base::Value::TYPE_DICTIONARY)) {
- return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
}
+ if (!value->IsType(base::Value::TYPE_DICTIONARY))
+ return PersistentPrefStore::PREF_READ_ERROR_JSON_TYPE;
return PersistentPrefStore::PREF_READ_ERROR_NONE;
}
@@ -147,26 +143,26 @@ scoped_refptr<base::SequencedTaskRunner> JsonPrefStore::GetTaskRunnerForFile(
}
JsonPrefStore::JsonPrefStore(
- const base::FilePath& filename,
+ const base::FilePath& pref_filename,
const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
scoped_ptr<PrefFilter> pref_filter)
- : JsonPrefStore(filename,
+ : JsonPrefStore(pref_filename,
base::FilePath(),
sequenced_task_runner,
pref_filter.Pass()) {
}
JsonPrefStore::JsonPrefStore(
- const base::FilePath& filename,
- const base::FilePath& alternate_filename,
+ const base::FilePath& pref_filename,
+ const base::FilePath& pref_alternate_filename,
const scoped_refptr<base::SequencedTaskRunner>& sequenced_task_runner,
scoped_ptr<PrefFilter> pref_filter)
- : path_(filename),
- alternate_path_(alternate_filename),
+ : path_(pref_filename),
+ alternate_path_(pref_alternate_filename),
sequenced_task_runner_(sequenced_task_runner),
prefs_(new base::DictionaryValue()),
read_only_(false),
- writer_(filename, sequenced_task_runner),
+ writer_(pref_filename, sequenced_task_runner),
pref_filter_(pref_filter.Pass()),
initialized_(false),
filtering_in_progress_(false),
@@ -180,7 +176,7 @@ bool JsonPrefStore::GetValue(const std::string& key,
const base::Value** result) const {
DCHECK(CalledOnValidThread());
- base::Value* tmp = NULL;
+ base::Value* tmp = nullptr;
if (!prefs_->Get(key, &tmp))
return false;
@@ -226,7 +222,7 @@ void JsonPrefStore::SetValue(const std::string& key,
DCHECK(CalledOnValidThread());
DCHECK(value);
- base::Value* old_value = NULL;
+ base::Value* old_value = nullptr;
prefs_->Get(key, &old_value);
if (!old_value || !value->Equals(old_value)) {
prefs_->Set(key, value.Pass());
@@ -240,7 +236,7 @@ void JsonPrefStore::SetValueSilently(const std::string& key,
DCHECK(CalledOnValidThread());
DCHECK(value);
- base::Value* old_value = NULL;
+ base::Value* old_value = nullptr;
prefs_->Get(key, &old_value);
if (!old_value || !value->Equals(old_value)) {
prefs_->Set(key, value.Pass());
@@ -251,14 +247,14 @@ void JsonPrefStore::SetValueSilently(const std::string& key,
void JsonPrefStore::RemoveValue(const std::string& key, uint32 flags) {
DCHECK(CalledOnValidThread());
- if (prefs_->RemovePath(key, NULL))
+ if (prefs_->RemovePath(key, nullptr))
ReportValueChanged(key, flags);
}
void JsonPrefStore::RemoveValueSilently(const std::string& key, uint32 flags) {
DCHECK(CalledOnValidThread());
- prefs_->RemovePath(key, NULL);
+ prefs_->RemovePath(key, nullptr);
ScheduleWrite(flags);
}
@@ -368,11 +364,6 @@ void JsonPrefStore::OnFileRead(scoped_ptr<ReadResult> read_result) {
// operation itself.
NOTREACHED();
break;
- case PREF_READ_ERROR_LEVELDB_IO:
- case PREF_READ_ERROR_LEVELDB_CORRUPTION_READ_ONLY:
- case PREF_READ_ERROR_LEVELDB_CORRUPTION:
- // These are specific to LevelDBPrefStore.
- NOTREACHED();
case PREF_READ_ERROR_MAX_ENUM:
NOTREACHED();
break;
diff --git a/chromium/base/prefs/json_pref_store.h b/chromium/base/prefs/json_pref_store.h
index 0be7702481e..d9dcdbdc6cd 100644
--- a/chromium/base/prefs/json_pref_store.h
+++ b/chromium/base/prefs/json_pref_store.h
@@ -15,7 +15,6 @@
#include "base/files/important_file_writer.h"
#include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h"
-#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list.h"
#include "base/prefs/base_prefs_export.h"
diff --git a/chromium/base/prefs/persistent_pref_store.h b/chromium/base/prefs/persistent_pref_store.h
index ad8a0a3a66e..89c7a71961c 100644
--- a/chromium/base/prefs/persistent_pref_store.h
+++ b/chromium/base/prefs/persistent_pref_store.h
@@ -33,9 +33,6 @@ class BASE_PREFS_EXPORT PersistentPrefStore : public WriteablePrefStore {
// Indicates that ReadPrefs() couldn't complete synchronously and is waiting
// for an asynchronous task to complete first.
PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE = 10,
- PREF_READ_ERROR_LEVELDB_IO = 11,
- PREF_READ_ERROR_LEVELDB_CORRUPTION_READ_ONLY = 12,
- PREF_READ_ERROR_LEVELDB_CORRUPTION = 13,
PREF_READ_ERROR_MAX_ENUM
};
diff --git a/chromium/base/prefs/pref_value_store.h b/chromium/base/prefs/pref_value_store.h
index 51601156604..5b832da5007 100644
--- a/chromium/base/prefs/pref_value_store.h
+++ b/chromium/base/prefs/pref_value_store.h
@@ -11,7 +11,6 @@
#include "base/basictypes.h"
#include "base/callback.h"
-#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
#include "base/prefs/base_prefs_export.h"
#include "base/prefs/pref_store.h"
@@ -179,13 +178,6 @@ class BASE_PREFS_EXPORT PrefValueStore {
typedef std::map<std::string, base::Value::Type> PrefTypeMap;
- friend class PrefValueStorePolicyRefreshTest;
- FRIEND_TEST_ALL_PREFIXES(PrefValueStorePolicyRefreshTest, TestPolicyRefresh);
- FRIEND_TEST_ALL_PREFIXES(PrefValueStorePolicyRefreshTest,
- TestRefreshPolicyPrefsCompletion);
- FRIEND_TEST_ALL_PREFIXES(PrefValueStorePolicyRefreshTest,
- TestConcurrentPolicyRefresh);
-
// Returns true if the preference with the given name has a value in the
// given PrefStoreType, of the same value type as the preference was
// registered with.
diff --git a/chromium/base/process/BUILD.gn b/chromium/base/process/BUILD.gn
index 3978b2e58cf..eabbde538f1 100644
--- a/chromium/base/process/BUILD.gn
+++ b/chromium/base/process/BUILD.gn
@@ -22,6 +22,7 @@ source_set("process") {
"memory_linux.cc",
"memory_mac.mm",
"memory_win.cc",
+ "port_provider_mac.h",
"process.h",
"process_handle_freebsd.cc",
"process_handle_linux.cc",
@@ -41,7 +42,6 @@ source_set("process") {
"process_iterator_openbsd.cc",
"process_iterator_win.cc",
"process_linux.cc",
- "process_mac.cc",
"process_metrics.cc",
"process_metrics.h",
"process_metrics_freebsd.cc",
@@ -96,7 +96,10 @@ source_set("process") {
}
if (is_ios) {
- sources += [ "process_metrics.cc" ]
+ sources += [
+ "memory_stubs.cc",
+ "process_metrics.cc",
+ ]
}
configs += [ "//base:base_implementation" ]
diff --git a/chromium/base/process/internal_linux.cc b/chromium/base/process/internal_linux.cc
index 4f3fcaccbfb..e6c2119c141 100644
--- a/chromium/base/process/internal_linux.cc
+++ b/chromium/base/process/internal_linux.cc
@@ -97,8 +97,9 @@ bool ParseProcStats(const std::string& stats_data,
close_parens_idx - (open_parens_idx + 1)));
// Split the rest.
- std::vector<std::string> other_stats;
- SplitString(stats_data.substr(close_parens_idx + 2), ' ', &other_stats);
+ std::vector<std::string> other_stats = SplitString(
+ stats_data.substr(close_parens_idx + 2), " ",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
for (size_t i = 0; i < other_stats.size(); ++i)
proc_stats->push_back(other_stats[i]);
return true;
diff --git a/chromium/base/process/kill.h b/chromium/base/process/kill.h
index dbd32e17058..bb4103fb643 100644
--- a/chromium/base/process/kill.h
+++ b/chromium/base/process/kill.h
@@ -37,6 +37,7 @@ enum TerminationStatus {
// a hint.
TERMINATION_STATUS_OOM_PROTECTED, // child was protected from oom kill
#endif
+ TERMINATION_STATUS_LAUNCH_FAILED, // child process never launched
TERMINATION_STATUS_MAX_ENUM
};
diff --git a/chromium/base/process/kill_win.cc b/chromium/base/process/kill_win.cc
index 0da3a26ae4f..7cbf9489051 100644
--- a/chromium/base/process/kill_win.cc
+++ b/chromium/base/process/kill_win.cc
@@ -57,7 +57,7 @@ class TimerExpiredTask : public win::ObjectWatcher::Delegate {
};
TimerExpiredTask::TimerExpiredTask(Process process) : process_(process.Pass()) {
- watcher_.StartWatching(process_.Handle(), this);
+ watcher_.StartWatchingOnce(process_.Handle(), this);
}
TimerExpiredTask::~TimerExpiredTask() {
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index 0e42cd006ff..42b8a7670d9 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -164,16 +164,6 @@ struct BASE_EXPORT LaunchOptions {
// process' controlling terminal.
int ctrl_terminal_fd;
#endif // defined(OS_CHROMEOS)
-
-#if defined(OS_MACOSX)
- // If this name is non-empty, the new child, after fork() but before exec(),
- // will look up this server name in the bootstrap namespace. The resulting
- // service port will be replaced as the bootstrap port in the child. Because
- // the process's IPC space is cleared on exec(), any rights to the old
- // bootstrap port will not be transferred to the new process.
- std::string replacement_bootstrap_name;
-#endif
-
#endif // !defined(OS_WIN)
};
@@ -236,7 +226,7 @@ BASE_EXPORT bool SetJobObjectLimitFlags(HANDLE job_object, DWORD limit_flags);
// Output multi-process printf, cout, cerr, etc to the cmd.exe console that ran
// chrome. This is not thread-safe: only call from main thread.
-BASE_EXPORT void RouteStdioToConsole();
+BASE_EXPORT void RouteStdioToConsole(bool create_console_if_not_found);
#endif // defined(OS_WIN)
// Executes the application specified by |cl| and wait for it to exit. Stores
@@ -245,6 +235,10 @@ BASE_EXPORT void RouteStdioToConsole();
// indicating success).
BASE_EXPORT bool GetAppOutput(const CommandLine& cl, std::string* output);
+// Like GetAppOutput, but also includes stderr.
+BASE_EXPORT bool GetAppOutputAndError(const CommandLine& cl,
+ std::string* output);
+
#if defined(OS_WIN)
// A Windows-specific version of GetAppOutput that takes a command line string
// instead of a CommandLine object. Useful for situations where you need to
@@ -286,11 +280,6 @@ BASE_EXPORT void RaiseProcessToHighPriority();
// in the child after forking will restore the standard exception handler.
// See http://crbug.com/20371/ for more details.
void RestoreDefaultExceptionHandler();
-
-// Look up the bootstrap server named |replacement_bootstrap_name| via the
-// current |bootstrap_port|. Then replace the task's bootstrap port with the
-// received right.
-void ReplaceBootstrapPort(const std::string& replacement_bootstrap_name);
#endif // defined(OS_MACOSX)
// Creates a LaunchOptions object suitable for launching processes in a test
diff --git a/chromium/base/process/launch_mac.cc b/chromium/base/process/launch_mac.cc
index ce02475541e..5895eae4351 100644
--- a/chromium/base/process/launch_mac.cc
+++ b/chromium/base/process/launch_mac.cc
@@ -28,21 +28,4 @@ void RestoreDefaultExceptionHandler() {
EXCEPTION_DEFAULT, THREAD_STATE_NONE);
}
-void ReplaceBootstrapPort(const std::string& new_bootstrap_name) {
- // This function is called between fork() and exec(), so it should take care
- // to run properly in that situation.
-
- mach_port_t port = MACH_PORT_NULL;
- kern_return_t kr = bootstrap_look_up(bootstrap_port,
- new_bootstrap_name.c_str(), &port);
- if (kr != KERN_SUCCESS) {
- RAW_LOG(FATAL, "Failed to look up replacement bootstrap port.");
- }
-
- kr = task_set_bootstrap_port(mach_task_self(), port);
- if (kr != KERN_SUCCESS) {
- RAW_LOG(FATAL, "Failed to replace bootstrap port.");
- }
-}
-
} // namespace base
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index 99d8e3aa64d..f076733d783 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -22,7 +22,6 @@
#include <limits>
#include <set>
-#include "base/allocator/type_profiler_control.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/debug/debugger.h"
@@ -392,11 +391,6 @@ Process LaunchProcess(const std::vector<std::string>& argv,
}
}
- // Stop type-profiler.
- // The profiler should be stopped between fork and exec since it inserts
- // locks at new/delete expressions. See http://crbug.com/36678.
- base::type_profiler::Controller::Stop();
-
if (options.maximize_rlimits) {
// Some resource limits need to be maximal in this child.
for (size_t i = 0; i < options.maximize_rlimits->size(); ++i) {
@@ -415,8 +409,6 @@ Process LaunchProcess(const std::vector<std::string>& argv,
#if defined(OS_MACOSX)
RestoreDefaultExceptionHandler();
- if (!options.replacement_bootstrap_name.empty())
- ReplaceBootstrapPort(options.replacement_bootstrap_name);
#endif // defined(OS_MACOSX)
ResetChildSignalHandlersToDefaults();
@@ -530,7 +522,8 @@ enum GetAppOutputInternalResult {
// path for the application; in that case, |envp| must be null, and it will use
// the current environment. If |do_search_path| is false, |argv[0]| should fully
// specify the path of the application, and |envp| will be used as the
-// environment. Redirects stderr to /dev/null.
+// environment. If |include_stderr| is true, includes stderr otherwise redirects
+// it to /dev/null.
// If we successfully start the application and get all requested output, we
// return GOT_MAX_OUTPUT, or if there is a problem starting or exiting
// the application we return RUN_FAILURE. Otherwise we return EXECUTE_SUCCESS.
@@ -543,6 +536,7 @@ enum GetAppOutputInternalResult {
static GetAppOutputInternalResult GetAppOutputInternal(
const std::vector<std::string>& argv,
char* const envp[],
+ bool include_stderr,
std::string* output,
size_t max_output,
bool do_search_path,
@@ -591,13 +585,10 @@ static GetAppOutputInternalResult GetAppOutputInternal(
if (dev_null < 0)
_exit(127);
- // Stop type-profiler.
- // The profiler should be stopped between fork and exec since it inserts
- // locks at new/delete expressions. See http://crbug.com/36678.
- base::type_profiler::Controller::Stop();
-
fd_shuffle1.push_back(InjectionArc(pipe_fd[1], STDOUT_FILENO, true));
- fd_shuffle1.push_back(InjectionArc(dev_null, STDERR_FILENO, true));
+ fd_shuffle1.push_back(InjectionArc(
+ include_stderr ? pipe_fd[1] : dev_null,
+ STDERR_FILENO, true));
fd_shuffle1.push_back(InjectionArc(dev_null, STDIN_FILENO, true));
// Adding another element here? Remeber to increase the argument to
// reserve(), above.
@@ -666,11 +657,20 @@ bool GetAppOutput(const std::vector<std::string>& argv, std::string* output) {
// Run |execve()| with the current environment and store "unlimited" data.
int exit_code;
GetAppOutputInternalResult result = GetAppOutputInternal(
- argv, NULL, output, std::numeric_limits<std::size_t>::max(), true,
+ argv, NULL, false, output, std::numeric_limits<std::size_t>::max(), true,
&exit_code);
return result == EXECUTE_SUCCESS && exit_code == EXIT_SUCCESS;
}
+bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
+ // Run |execve()| with the current environment and store "unlimited" data.
+ int exit_code;
+ GetAppOutputInternalResult result = GetAppOutputInternal(
+ cl.argv(), NULL, true, output, std::numeric_limits<std::size_t>::max(),
+ true, &exit_code);
+ return result == EXECUTE_SUCCESS && exit_code == EXIT_SUCCESS;
+}
+
// TODO(viettrungluu): Conceivably, we should have a timeout as well, so we
// don't hang if what we're calling hangs.
bool GetAppOutputRestricted(const CommandLine& cl,
@@ -679,7 +679,7 @@ bool GetAppOutputRestricted(const CommandLine& cl,
char* const empty_environ = NULL;
int exit_code;
GetAppOutputInternalResult result = GetAppOutputInternal(
- cl.argv(), &empty_environ, output, max_output, false, &exit_code);
+ cl.argv(), &empty_environ, false, output, max_output, false, &exit_code);
return result == GOT_MAX_OUTPUT || (result == EXECUTE_SUCCESS &&
exit_code == EXIT_SUCCESS);
}
@@ -689,8 +689,8 @@ bool GetAppOutputWithExitCode(const CommandLine& cl,
int* exit_code) {
// Run |execve()| with the current environment and store "unlimited" data.
GetAppOutputInternalResult result = GetAppOutputInternal(
- cl.argv(), NULL, output, std::numeric_limits<std::size_t>::max(), true,
- exit_code);
+ cl.argv(), NULL, false, output, std::numeric_limits<std::size_t>::max(),
+ true, exit_code);
return result == EXECUTE_SUCCESS;
}
diff --git a/chromium/base/process/launch_win.cc b/chromium/base/process/launch_win.cc
index fa59f1ae907..54b06675100 100644
--- a/chromium/base/process/launch_win.cc
+++ b/chromium/base/process/launch_win.cc
@@ -46,9 +46,91 @@ namespace {
// process goes away.
const DWORD kProcessKilledExitCode = 1;
+bool GetAppOutputInternal(const StringPiece16& cl,
+ bool include_stderr,
+ std::string* output) {
+ HANDLE out_read = NULL;
+ HANDLE out_write = NULL;
+
+ SECURITY_ATTRIBUTES sa_attr;
+ // Set the bInheritHandle flag so pipe handles are inherited.
+ sa_attr.nLength = sizeof(SECURITY_ATTRIBUTES);
+ sa_attr.bInheritHandle = TRUE;
+ sa_attr.lpSecurityDescriptor = NULL;
+
+ // Create the pipe for the child process's STDOUT.
+ if (!CreatePipe(&out_read, &out_write, &sa_attr, 0)) {
+ NOTREACHED() << "Failed to create pipe";
+ return false;
+ }
+
+ // Ensure we don't leak the handles.
+ win::ScopedHandle scoped_out_read(out_read);
+ win::ScopedHandle scoped_out_write(out_write);
+
+ // Ensure the read handles to the pipes are not inherited.
+ if (!SetHandleInformation(out_read, HANDLE_FLAG_INHERIT, 0)) {
+ NOTREACHED() << "Failed to disabled pipe inheritance";
+ return false;
+ }
+
+ FilePath::StringType writable_command_line_string;
+ writable_command_line_string.assign(cl.data(), cl.size());
+
+ STARTUPINFO start_info = {};
+
+ start_info.cb = sizeof(STARTUPINFO);
+ start_info.hStdOutput = out_write;
+ // Keep the normal stdin.
+ start_info.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
+ if (include_stderr) {
+ start_info.hStdError = out_write;
+ } else {
+ start_info.hStdError = GetStdHandle(STD_ERROR_HANDLE);
+ }
+ start_info.dwFlags |= STARTF_USESTDHANDLES;
+
+ // Create the child process.
+ PROCESS_INFORMATION temp_process_info = {};
+ if (!CreateProcess(NULL,
+ &writable_command_line_string[0],
+ NULL, NULL,
+ TRUE, // Handles are inherited.
+ 0, NULL, NULL, &start_info, &temp_process_info)) {
+ NOTREACHED() << "Failed to start process";
+ return false;
+ }
+ base::win::ScopedProcessInformation proc_info(temp_process_info);
+
+ // Close our writing end of pipe now. Otherwise later read would not be able
+ // to detect end of child's output.
+ scoped_out_write.Close();
+
+ // Read output from the child process's pipe for STDOUT
+ const int kBufferSize = 1024;
+ char buffer[kBufferSize];
+
+ for (;;) {
+ DWORD bytes_read = 0;
+ BOOL success = ReadFile(out_read, buffer, kBufferSize, &bytes_read, NULL);
+ if (!success || bytes_read == 0)
+ break;
+ output->append(buffer, bytes_read);
+ }
+
+ // Let's wait for the process to finish.
+ WaitForSingleObject(proc_info.process_handle(), INFINITE);
+
+ int exit_code;
+ base::TerminationStatus status = GetTerminationStatus(
+ proc_info.process_handle(), &exit_code);
+ return status != base::TERMINATION_STATUS_PROCESS_CRASHED &&
+ status != base::TERMINATION_STATUS_ABNORMAL_TERMINATION;
+}
+
} // namespace
-void RouteStdioToConsole() {
+void RouteStdioToConsole(bool create_console_if_not_found) {
// Don't change anything if stdout or stderr already point to a
// valid stream.
//
@@ -64,8 +146,22 @@ void RouteStdioToConsole() {
// stdout/stderr on startup (before the handle IDs can be reused).
// _fileno(stdout) will return -2 (_NO_CONSOLE_FILENO) if stdout was
// invalid.
- if (_fileno(stdout) >= 0 || _fileno(stderr) >= 0)
- return;
+ if (_fileno(stdout) >= 0 || _fileno(stderr) >= 0) {
+ // _fileno was broken for SUBSYSTEM:WINDOWS from VS2010 to VS2012/2013.
+ // http://crbug.com/358267. Confirm that the underlying HANDLE is valid
+ // before aborting.
+
+ // This causes NaCl tests to hang on XP for reasons unclear, perhaps due
+ // to not being able to inherit handles. Since it's only for debugging,
+ // and redirecting still works, punt for now.
+ if (base::win::GetVersion() < base::win::VERSION_VISTA)
+ return;
+
+ intptr_t stdout_handle = _get_osfhandle(_fileno(stdout));
+ intptr_t stderr_handle = _get_osfhandle(_fileno(stderr));
+ if (stdout_handle >= 0 || stderr_handle >= 0)
+ return;
+ }
if (!AttachConsole(ATTACH_PARENT_PROCESS)) {
unsigned int result = GetLastError();
@@ -76,10 +172,14 @@ void RouteStdioToConsole() {
// parent process is invalid (eg: crashed).
if (result == ERROR_GEN_FAILURE)
return;
- // Make a new console if attaching to parent fails with any other error.
- // It should be ERROR_INVALID_HANDLE at this point, which means the browser
- // was likely not started from a console.
- AllocConsole();
+ if (create_console_if_not_found) {
+ // Make a new console if attaching to parent fails with any other error.
+ // It should be ERROR_INVALID_HANDLE at this point, which means the
+ // browser was likely not started from a console.
+ AllocConsole();
+ } else {
+ return;
+ }
}
// Arbitrary byte count to use when buffering output lines. More
@@ -274,76 +374,12 @@ bool GetAppOutput(const CommandLine& cl, std::string* output) {
return GetAppOutput(cl.GetCommandLineString(), output);
}
-bool GetAppOutput(const StringPiece16& cl, std::string* output) {
- HANDLE out_read = NULL;
- HANDLE out_write = NULL;
-
- SECURITY_ATTRIBUTES sa_attr;
- // Set the bInheritHandle flag so pipe handles are inherited.
- sa_attr.nLength = sizeof(SECURITY_ATTRIBUTES);
- sa_attr.bInheritHandle = TRUE;
- sa_attr.lpSecurityDescriptor = NULL;
-
- // Create the pipe for the child process's STDOUT.
- if (!CreatePipe(&out_read, &out_write, &sa_attr, 0)) {
- NOTREACHED() << "Failed to create pipe";
- return false;
- }
-
- // Ensure we don't leak the handles.
- win::ScopedHandle scoped_out_read(out_read);
- win::ScopedHandle scoped_out_write(out_write);
-
- // Ensure the read handle to the pipe for STDOUT is not inherited.
- if (!SetHandleInformation(out_read, HANDLE_FLAG_INHERIT, 0)) {
- NOTREACHED() << "Failed to disabled pipe inheritance";
- return false;
- }
-
- FilePath::StringType writable_command_line_string;
- writable_command_line_string.assign(cl.data(), cl.size());
-
- STARTUPINFO start_info = {};
-
- start_info.cb = sizeof(STARTUPINFO);
- start_info.hStdOutput = out_write;
- // Keep the normal stdin and stderr.
- start_info.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
- start_info.hStdError = GetStdHandle(STD_ERROR_HANDLE);
- start_info.dwFlags |= STARTF_USESTDHANDLES;
-
- // Create the child process.
- PROCESS_INFORMATION temp_process_info = {};
- if (!CreateProcess(NULL,
- &writable_command_line_string[0],
- NULL, NULL,
- TRUE, // Handles are inherited.
- 0, NULL, NULL, &start_info, &temp_process_info)) {
- NOTREACHED() << "Failed to start process";
- return false;
- }
- base::win::ScopedProcessInformation proc_info(temp_process_info);
-
- // Close our writing end of pipe now. Otherwise later read would not be able
- // to detect end of child's output.
- scoped_out_write.Close();
-
- // Read output from the child process's pipe for STDOUT
- const int kBufferSize = 1024;
- char buffer[kBufferSize];
-
- for (;;) {
- DWORD bytes_read = 0;
- BOOL success = ReadFile(out_read, buffer, kBufferSize, &bytes_read, NULL);
- if (!success || bytes_read == 0)
- break;
- output->append(buffer, bytes_read);
- }
-
- // Let's wait for the process to finish.
- WaitForSingleObject(proc_info.process_handle(), INFINITE);
+bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
+ return GetAppOutputInternal(cl.GetCommandLineString(), true, output);
+}
- return true;
+bool GetAppOutput(const StringPiece16& cl, std::string* output) {
+ return GetAppOutputInternal(cl, false, output);
}
void RaiseProcessToHighPriority() {
diff --git a/chromium/base/process/memory_mac.mm b/chromium/base/process/memory_mac.mm
index 4d719f8054e..17249a239b9 100644
--- a/chromium/base/process/memory_mac.mm
+++ b/chromium/base/process/memory_mac.mm
@@ -246,7 +246,7 @@ void oom_killer_new() {
// === Core Foundation CFAllocators ===
bool CanGetContextForCFAllocator() {
- return !base::mac::IsOSLaterThanYosemite_DontCallThis();
+ return !base::mac::IsOSLaterThanElCapitan_DontCallThis();
}
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
@@ -258,7 +258,8 @@ CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
} else if (base::mac::IsOSLion() ||
base::mac::IsOSMountainLion() ||
base::mac::IsOSMavericks() ||
- base::mac::IsOSYosemite()) {
+ base::mac::IsOSYosemite() ||
+ base::mac::IsOSElCapitan()) {
ChromeCFAllocatorLions* our_allocator =
const_cast<ChromeCFAllocatorLions*>(
reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
diff --git a/chromium/base/process/memory_unittest.cc b/chromium/base/process/memory_unittest.cc
index 0276b495915..98f049a3bf9 100644
--- a/chromium/base/process/memory_unittest.cc
+++ b/chromium/base/process/memory_unittest.cc
@@ -30,6 +30,16 @@
#endif
#if defined(OS_WIN)
+
+#if defined(_MSC_VER)
+// ssize_t needed for OutOfMemoryTest.
+#if defined(_WIN64)
+typedef __int64 ssize_t;
+#else
+typedef long ssize_t;
+#endif
+#endif
+
// HeapQueryInformation function pointer.
typedef BOOL (WINAPI* HeapQueryFn) \
(HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T, PSIZE_T);
@@ -130,8 +140,9 @@ TEST(ProcessMemoryTest, MacTerminateOnHeapCorruption) {
// OutOfMemoryTest cases. OpenBSD does not support these tests either.
// Don't test these on ASan/TSan/MSan configurations: only test the real
// allocator.
-// TODO(vandebo) make this work on Windows too.
-#if !defined(OS_ANDROID) && !defined(OS_OPENBSD) && !defined(OS_WIN) && \
+// Windows only supports these tests with the allocator shim in place.
+#if !defined(OS_ANDROID) && !defined(OS_OPENBSD) && \
+ !(defined(OS_WIN) && !defined(ALLOCATOR_SHIM)) && \
!defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#if defined(USE_TCMALLOC)
@@ -151,6 +162,9 @@ class OutOfMemoryTest : public testing::Test {
// Make test size as large as possible minus a few pages so
// that alignment or other rounding doesn't make it wrap.
test_size_(std::numeric_limits<std::size_t>::max() - 12 * 1024),
+ // A test size that is > 2Gb and will cause the allocators to reject
+ // the allocation due to security restrictions. See crbug.com/169327.
+ insecure_test_size_(std::numeric_limits<int>::max()),
signed_test_size_(std::numeric_limits<ssize_t>::max()) {
}
@@ -163,6 +177,7 @@ class OutOfMemoryTest : public testing::Test {
protected:
void* value_;
size_t test_size_;
+ size_t insecure_test_size_;
ssize_t signed_test_size_;
};
@@ -213,6 +228,47 @@ TEST_F(OutOfMemoryDeathTest, Calloc) {
}, kOomRegex);
}
+// OS X has no 2Gb allocation limit.
+// See https://crbug.com/169327.
+#if !defined(OS_MACOSX)
+TEST_F(OutOfMemoryDeathTest, SecurityNew) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = operator new(insecure_test_size_);
+ }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityNewArray) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = new char[insecure_test_size_];
+ }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityMalloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = malloc(insecure_test_size_);
+ }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityRealloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = realloc(NULL, insecure_test_size_);
+ }, kOomRegex);
+}
+
+TEST_F(OutOfMemoryDeathTest, SecurityCalloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = calloc(1024, insecure_test_size_ / 1024L);
+ }, kOomRegex);
+}
+#endif // !defined(OS_MACOSX)
+
+#if defined(OS_LINUX)
+
TEST_F(OutOfMemoryDeathTest, Valloc) {
ASSERT_DEATH({
SetUpInDeathAssert();
@@ -220,7 +276,12 @@ TEST_F(OutOfMemoryDeathTest, Valloc) {
}, kOomRegex);
}
-#if defined(OS_LINUX)
+TEST_F(OutOfMemoryDeathTest, SecurityValloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = valloc(insecure_test_size_);
+ }, kOomRegex);
+}
#if PVALLOC_AVAILABLE == 1
TEST_F(OutOfMemoryDeathTest, Pvalloc) {
@@ -229,6 +290,13 @@ TEST_F(OutOfMemoryDeathTest, Pvalloc) {
value_ = pvalloc(test_size_);
}, kOomRegex);
}
+
+TEST_F(OutOfMemoryDeathTest, SecurityPvalloc) {
+ ASSERT_DEATH({
+ SetUpInDeathAssert();
+ value_ = pvalloc(insecure_test_size_);
+ }, kOomRegex);
+}
#endif // PVALLOC_AVAILABLE == 1
TEST_F(OutOfMemoryDeathTest, Memalign) {
@@ -415,5 +483,5 @@ TEST_F(OutOfMemoryHandledTest, UncheckedCalloc) {
EXPECT_TRUE(value_ == NULL);
}
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
-#endif // !defined(OS_ANDROID) && !defined(OS_OPENBSD) && !defined(OS_WIN) &&
- // !defined(ADDRESS_SANITIZER)
+#endif // !defined(OS_ANDROID) && !defined(OS_OPENBSD) && !(defined(OS_WIN) &&
+ // !defined(ALLOCATOR_SHIM)) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/process/memory_win.cc b/chromium/base/process/memory_win.cc
index fc57b48f1f7..b949b5d8854 100644
--- a/chromium/base/process/memory_win.cc
+++ b/chromium/base/process/memory_win.cc
@@ -10,6 +10,21 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+// malloc_unchecked is required to implement UncheckedMalloc properly.
+// It's provided by allocator_shim_win.cc but since that's not always present,
+// we provide a default that falls back to regular malloc.
+typedef void* (*MallocFn)(size_t);
+extern "C" void* (*const malloc_unchecked)(size_t);
+extern "C" void* (*const malloc_default)(size_t) = &malloc;
+
+#if defined(_M_IX86)
+#pragma comment(linker, "/alternatename:_malloc_unchecked=_malloc_default")
+#elif defined(_M_X64) || defined(_M_ARM)
+#pragma comment(linker, "/alternatename:malloc_unchecked=malloc_default")
+#else
+#error Unsupported platform
+#endif
+
namespace base {
namespace {
@@ -17,10 +32,12 @@ namespace {
#pragma warning(push)
#pragma warning(disable: 4702)
-int OnNoMemory(size_t) {
+int OnNoMemory(size_t size) {
// Kill the process. This is important for security since most of code
// does not check the result of memory allocation.
- __debugbreak();
+ LOG(FATAL) << "Out of memory, size = " << size;
+
+ // Safety check, make sure process exits here.
_exit(1);
return 0;
}
@@ -88,14 +105,9 @@ HMODULE GetModuleFromAddress(void* address) {
return instance;
}
-// TODO(b.kelemen): implement it with the required semantics. On Linux this is
-// implemented with a weak symbol that is overridden by tcmalloc. This is
-// neccessary because base cannot have a direct dependency on tcmalloc. Since
-// weak symbols are not supported on Windows this will involve some build time
-// magic, much like what is done for libcrt in order to override the allocation
-// functions.
+// Implemented using a weak symbol.
bool UncheckedMalloc(size_t size, void** result) {
- *result = malloc(size);
+ *result = malloc_unchecked(size);
return *result != NULL;
}
diff --git a/chromium/base/process/port_provider_mac.h b/chromium/base/process/port_provider_mac.h
new file mode 100644
index 00000000000..bdee4a8a293
--- /dev/null
+++ b/chromium/base/process/port_provider_mac.h
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_PORT_PROVIDER_MAC_H_
+#define BASE_PROCESS_PORT_PROVIDER_MAC_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/process/process_handle.h"
+
+namespace base {
+
+// Abstract base class that provides a mapping from ProcessHandle (pid_t) to the
+// Mach task port. This replicates task_for_pid(), which requires root
+// privileges.
+class BASE_EXPORT PortProvider {
+ public:
+ virtual ~PortProvider() {}
+
+ // Returns the mach task port for |process| if possible, or else
+ // |MACH_PORT_NULL|.
+ virtual mach_port_t TaskForPid(ProcessHandle process) const = 0;
+};
+
+} // namespace base
+
+#endif // BASE_PROCESS_PORT_PROVIDER_MAC_H_
diff --git a/chromium/base/process/process.h b/chromium/base/process/process.h
index 1559554c011..2a83cb088ac 100644
--- a/chromium/base/process/process.h
+++ b/chromium/base/process/process.h
@@ -111,24 +111,6 @@ class BASE_EXPORT Process {
// is not required.
bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code);
-#if defined(OS_MACOSX)
- // The Mac needs a Mach port in order to manipulate a process's priority,
- // and there's no good way to get that from base given the pid. These Mac
- // variants of the IsProcessBackgrounded and SetProcessBackgrounded API take
- // the Mach port for this reason. See crbug.com/460102
- //
- // A process is backgrounded when its priority is lower than normal.
- // Return true if the process with mach port |task_port| is backgrounded,
- // false otherwise.
- bool IsProcessBackgrounded(mach_port_t task_port) const;
-
- // Set the process with the specified mach port as backgrounded. If value is
- // true, the priority of the process will be lowered. If value is false, the
- // priority of the process will be made "normal" - equivalent to default
- // process priority. Returns true if the priority was changed, false
- // otherwise.
- bool SetProcessBackgrounded(mach_port_t task_port, bool value);
-#else
// A process is backgrounded when it's priority is lower than normal.
// Return true if this process is backgrounded, false otherwise.
bool IsProcessBackgrounded() const;
@@ -138,7 +120,7 @@ class BASE_EXPORT Process {
// will be made "normal" - equivalent to default process priority.
// Returns true if the priority was changed, false otherwise.
bool SetProcessBackgrounded(bool value);
-#endif // defined(OS_MACOSX)
+
// Returns an integer representing the priority of a process. The meaning
// of this value is OS dependent.
int GetPriority() const;
diff --git a/chromium/base/process/process_info_linux.cc b/chromium/base/process/process_info_linux.cc
index 9ec23135bfc..88ae5a137d6 100644
--- a/chromium/base/process/process_info_linux.cc
+++ b/chromium/base/process/process_info_linux.cc
@@ -12,7 +12,7 @@
namespace base {
-//static
+// static
const Time CurrentProcessInfo::CreationTime() {
ProcessHandle pid = GetCurrentProcessHandle();
int64 start_ticks =
diff --git a/chromium/base/process/process_info_mac.cc b/chromium/base/process/process_info_mac.cc
index b7cfdceda05..7680d373df3 100644
--- a/chromium/base/process/process_info_mac.cc
+++ b/chromium/base/process/process_info_mac.cc
@@ -14,7 +14,7 @@
namespace base {
-//static
+// static
const Time CurrentProcessInfo::CreationTime() {
int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid() };
size_t len = 0;
diff --git a/chromium/base/process/process_iterator_mac.cc b/chromium/base/process/process_iterator_mac.cc
index 9b33a0a8e40..d9136f48b03 100644
--- a/chromium/base/process/process_iterator_mac.cc
+++ b/chromium/base/process/process_iterator_mac.cc
@@ -22,7 +22,8 @@ ProcessIterator::ProcessIterator(const ProcessFilter* filter)
// but trying to find where we were in a constantly changing list is basically
// impossible.
- int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, geteuid() };
+ int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID,
+ static_cast<int>(geteuid()) };
// Since more processes could start between when we get the size and when
// we get the list, we do a loop to keep trying until we get it.
diff --git a/chromium/base/process/process_linux.cc b/chromium/base/process/process_linux.cc
index 6e10dd2301f..958ffd6dafb 100644
--- a/chromium/base/process/process_linux.cc
+++ b/chromium/base/process/process_linux.cc
@@ -102,8 +102,8 @@ bool Process::IsProcessBackgrounded() const {
if (base::ReadFileToString(
base::FilePath(StringPrintf(kProcPath, process_)),
&proc)) {
- std::vector<std::string> proc_parts;
- base::SplitString(proc, ':', &proc_parts);
+ std::vector<std::string> proc_parts = base::SplitString(
+ proc, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
DCHECK_EQ(proc_parts.size(), 3u);
bool ret = proc_parts[2] == std::string(kBackground);
return ret;
diff --git a/chromium/base/process/process_mac.cc b/chromium/base/process/process_mac.cc
deleted file mode 100644
index 1913cc378e8..00000000000
--- a/chromium/base/process/process_mac.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/process/process.h"
-
-#include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
-
-#include <mach/mach.h>
-
-// The following was added to <mach/task_policy.h> after 10.8.
-// TODO(shrike): Remove the TASK_OVERRIDE_QOS_POLICY ifndef once builders
-// reach 10.9 or higher.
-#ifndef TASK_OVERRIDE_QOS_POLICY
-
-#define TASK_OVERRIDE_QOS_POLICY 9
-
-typedef struct task_category_policy task_category_policy_data_t;
-typedef struct task_category_policy* task_category_policy_t;
-
-enum task_latency_qos {
- LATENCY_QOS_TIER_UNSPECIFIED = 0x0,
- LATENCY_QOS_TIER_0 = ((0xFF << 16) | 1),
- LATENCY_QOS_TIER_1 = ((0xFF << 16) | 2),
- LATENCY_QOS_TIER_2 = ((0xFF << 16) | 3),
- LATENCY_QOS_TIER_3 = ((0xFF << 16) | 4),
- LATENCY_QOS_TIER_4 = ((0xFF << 16) | 5),
- LATENCY_QOS_TIER_5 = ((0xFF << 16) | 6)
-};
-typedef integer_t task_latency_qos_t;
-enum task_throughput_qos {
- THROUGHPUT_QOS_TIER_UNSPECIFIED = 0x0,
- THROUGHPUT_QOS_TIER_0 = ((0xFE << 16) | 1),
- THROUGHPUT_QOS_TIER_1 = ((0xFE << 16) | 2),
- THROUGHPUT_QOS_TIER_2 = ((0xFE << 16) | 3),
- THROUGHPUT_QOS_TIER_3 = ((0xFE << 16) | 4),
- THROUGHPUT_QOS_TIER_4 = ((0xFE << 16) | 5),
- THROUGHPUT_QOS_TIER_5 = ((0xFE << 16) | 6),
-};
-
-#define LATENCY_QOS_LAUNCH_DEFAULT_TIER LATENCY_QOS_TIER_3
-#define THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER THROUGHPUT_QOS_TIER_3
-
-typedef integer_t task_throughput_qos_t;
-
-struct task_qos_policy {
- task_latency_qos_t task_latency_qos_tier;
- task_throughput_qos_t task_throughput_qos_tier;
-};
-
-typedef struct task_qos_policy* task_qos_policy_t;
-#define TASK_QOS_POLICY_COUNT \
- ((mach_msg_type_number_t)(sizeof(struct task_qos_policy) / sizeof(integer_t)))
-
-#endif // TASK_OVERRIDE_QOS_POLICY
-
-namespace base {
-
-bool Process::CanBackgroundProcesses() {
- return true;
-}
-
-bool Process::IsProcessBackgrounded(mach_port_t task_port) const {
- // See SetProcessBackgrounded().
- DCHECK(IsValid());
- DCHECK_NE(task_port, TASK_NULL);
-
- task_category_policy_data_t category_policy;
- mach_msg_type_number_t task_info_count = TASK_CATEGORY_POLICY_COUNT;
- boolean_t get_default = FALSE;
-
- kern_return_t result =
- task_policy_get(task_port, TASK_CATEGORY_POLICY,
- reinterpret_cast<task_policy_t>(&category_policy),
- &task_info_count, &get_default);
- MACH_LOG_IF(ERROR, result != KERN_SUCCESS, result) <<
- "task_policy_get TASK_CATEGORY_POLICY";
-
- if (result == KERN_SUCCESS && get_default == FALSE) {
- return category_policy.role == TASK_BACKGROUND_APPLICATION;
- }
- return false;
-}
-
-bool Process::SetProcessBackgrounded(mach_port_t task_port, bool background) {
- DCHECK(IsValid());
- DCHECK_NE(task_port, TASK_NULL);
-
- if (!CanBackgroundProcesses()) {
- return false;
- } else if (IsProcessBackgrounded(task_port) == background) {
- return true;
- }
-
- task_category_policy category_policy;
- category_policy.role =
- background ? TASK_BACKGROUND_APPLICATION : TASK_FOREGROUND_APPLICATION;
- kern_return_t result =
- task_policy_set(task_port, TASK_CATEGORY_POLICY,
- reinterpret_cast<task_policy_t>(&category_policy),
- TASK_CATEGORY_POLICY_COUNT);
-
- if (result != KERN_SUCCESS) {
- MACH_LOG(ERROR, result) << "task_policy_set TASK_CATEGORY_POLICY";
- return false;
- } else if (!mac::IsOSMavericksOrLater()) {
- return true;
- }
-
- // Latency QoS regulates timer throttling/accuracy. Select default tier
- // on foreground because precise timer firing isn't needed.
- struct task_qos_policy qos_policy = {
- background ? LATENCY_QOS_TIER_5 : LATENCY_QOS_TIER_UNSPECIFIED,
- background ? THROUGHPUT_QOS_TIER_5 : THROUGHPUT_QOS_TIER_UNSPECIFIED
- };
- result = task_policy_set(task_port, TASK_OVERRIDE_QOS_POLICY,
- reinterpret_cast<task_policy_t>(&qos_policy),
- TASK_QOS_POLICY_COUNT);
- if (result != KERN_SUCCESS) {
- MACH_LOG(ERROR, result) << "task_policy_set TASK_OVERRIDE_QOS_POLICY";
- return false;
- }
-
- return true;
-}
-
-} // namespace base
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 8b4ec86693a..327483a42e7 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -19,6 +19,7 @@
#if defined(OS_MACOSX)
#include <mach/mach.h>
+#include "base/process/port_provider_mac.h"
#endif
namespace base {
@@ -101,16 +102,6 @@ class BASE_EXPORT ProcessMetrics {
#if !defined(OS_MACOSX) || defined(OS_IOS)
static ProcessMetrics* CreateProcessMetrics(ProcessHandle process);
#else
- class PortProvider {
- public:
- virtual ~PortProvider() {}
-
- // Should return the mach task for |process| if possible, or else
- // |MACH_PORT_NULL|. Only processes that this returns tasks for will have
- // metrics on OS X (except for the current process, which always gets
- // metrics).
- virtual mach_port_t TaskForPid(ProcessHandle process) const = 0;
- };
// The port provider needs to outlive the ProcessMetrics object returned by
// this function. If NULL is passed as provider, the returned object
@@ -242,23 +233,16 @@ BASE_EXPORT size_t GetMaxFds();
BASE_EXPORT void SetFdLimit(unsigned int max_descriptors);
#endif // defined(OS_POSIX)
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-// Parse the data found in /proc/<pid>/stat and return the sum of the
-// CPU-related ticks. Returns -1 on parse error.
-// Exposed for testing.
-BASE_EXPORT int ParseProcStatCPU(const std::string& input);
-
-// Get the number of threads of |process| as available in /proc/<pid>/stat.
-// This should be used with care as no synchronization with running threads is
-// done. This is mostly useful to guarantee being single-threaded.
-// Returns 0 on failure.
-BASE_EXPORT int GetNumberOfThreads(ProcessHandle process);
-
-// /proc/self/exe refers to the current executable.
-BASE_EXPORT extern const char kProcSelfExe[];
-
-// Data from /proc/meminfo about system-wide memory consumption.
-// Values are in KB.
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+// Data about system-wide memory consumption. Values are in KB. Available on
+// Windows, Mac, Linux, Android and Chrome OS.
+//
+// Total/free memory are available on all platforms that implement
+// GetSystemMemoryInfo(). Total/free swap memory are available on all platforms
+// except on Mac. Buffers/cached/active_anon/inactive_anon/active_file/
+// inactive_file/dirty/pswpin/pswpout/pgmajfault are available on
+// Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
struct BASE_EXPORT SystemMemoryInfoKB {
SystemMemoryInfoKB();
@@ -267,30 +251,62 @@ struct BASE_EXPORT SystemMemoryInfoKB {
int total;
int free;
+
+#if !defined(OS_MACOSX)
+ int swap_total;
+ int swap_free;
+#endif
+
+#if defined(OS_ANDROID) || defined(OS_LINUX)
int buffers;
int cached;
int active_anon;
int inactive_anon;
int active_file;
int inactive_file;
- int swap_total;
- int swap_free;
int dirty;
// vmstats data.
int pswpin;
int pswpout;
int pgmajfault;
+#endif // defined(OS_ANDROID) || defined(OS_LINUX)
-#ifdef OS_CHROMEOS
+#if defined(OS_CHROMEOS)
int shmem;
int slab;
// Gem data will be -1 if not supported.
int gem_objects;
long long gem_size;
-#endif
+#endif // defined(OS_CHROMEOS)
};
+// On Linux/Android/Chrome OS, system-wide memory consumption data is parsed
+// from /proc/meminfo and /proc/vmstat. On Windows/Mac, it is obtained using
+// system API calls.
+//
+// Fills in the provided |meminfo| structure. Returns true on success.
+// Exposed for memory debugging widget.
+BASE_EXPORT bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo);
+
+#endif // defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) ||
+ // defined(OS_ANDROID)
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+// Parse the data found in /proc/<pid>/stat and return the sum of the
+// CPU-related ticks. Returns -1 on parse error.
+// Exposed for testing.
+BASE_EXPORT int ParseProcStatCPU(const std::string& input);
+
+// Get the number of threads of |process| as available in /proc/<pid>/stat.
+// This should be used with care as no synchronization with running threads is
+// done. This is mostly useful to guarantee being single-threaded.
+// Returns 0 on failure.
+BASE_EXPORT int GetNumberOfThreads(ProcessHandle process);
+
+// /proc/self/exe refers to the current executable.
+BASE_EXPORT extern const char kProcSelfExe[];
+
// Parses a string containing the contents of /proc/meminfo
// returns true on success or false for a parsing error
BASE_EXPORT bool ParseProcMeminfo(const std::string& input,
@@ -301,12 +317,6 @@ BASE_EXPORT bool ParseProcMeminfo(const std::string& input,
BASE_EXPORT bool ParseProcVmstat(const std::string& input,
SystemMemoryInfoKB* meminfo);
-// Retrieves data from /proc/meminfo and /proc/vmstat
-// about system-wide memory consumption.
-// Fills in the provided |meminfo| structure. Returns true on success.
-// Exposed for memory debugging widget.
-BASE_EXPORT bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo);
-
// Data from /proc/diskstats about system-wide disk I/O.
struct BASE_EXPORT SystemDiskInfo {
SystemDiskInfo();
diff --git a/chromium/base/process/process_metrics_ios.cc b/chromium/base/process/process_metrics_ios.cc
index 07f2c8de1d4..135ef43c06c 100644
--- a/chromium/base/process/process_metrics_ios.cc
+++ b/chromium/base/process/process_metrics_ios.cc
@@ -23,6 +23,11 @@ bool GetTaskInfo(task_basic_info_64* task_info_data) {
} // namespace
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+ total = 0;
+ free = 0;
+}
+
ProcessMetrics::ProcessMetrics(ProcessHandle process) {}
ProcessMetrics::~ProcessMetrics() {}
@@ -82,4 +87,11 @@ size_t GetSystemCommitCharge() {
return 0;
}
+// Bytes committed by the system.
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+ // Unimplemented. Must enable unittest for IOS when this gets implemented.
+ NOTIMPLEMENTED();
+ return false;
+}
+
} // namespace base
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 47a79e51e5e..adca7c5ee23 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -67,8 +67,8 @@ size_t ReadProcStatusAndGetFieldAsSizeT(pid_t pid, const std::string& field) {
const std::string& key = pairs[i].first;
const std::string& value_str = pairs[i].second;
if (key == field) {
- std::vector<std::string> split_value_str;
- SplitString(value_str, ' ', &split_value_str);
+ std::vector<StringPiece> split_value_str = SplitStringPiece(
+ value_str, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
if (split_value_str.size() != 2 || split_value_str[1] != "kB") {
NOTREACHED();
return 0;
@@ -316,8 +316,9 @@ bool ProcessMetrics::GetWorkingSetKBytesTotmaps(WorkingSetKBytes *ws_usage)
return false;
}
- std::vector<std::string> totmaps_fields;
- SplitStringAlongWhitespace(totmaps_data, &totmaps_fields);
+ std::vector<std::string> totmaps_fields = SplitString(
+ totmaps_data, base::kWhitespaceASCII, base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
DCHECK_EQ("Pss:", totmaps_fields[kPssIndex-1]);
DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
@@ -368,8 +369,8 @@ bool ProcessMetrics::GetWorkingSetKBytesStatm(WorkingSetKBytes* ws_usage)
return false;
}
- std::vector<std::string> statm_vec;
- SplitString(statm, ' ', &statm_vec);
+ std::vector<StringPiece> statm_vec = SplitStringPiece(
+ statm, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
if (statm_vec.size() != 7)
return false; // Not the format we expect.
diff --git a/chromium/base/process/process_metrics_mac.cc b/chromium/base/process/process_metrics_mac.cc
index f84b435a109..a2ecd8e7185 100644
--- a/chromium/base/process/process_metrics_mac.cc
+++ b/chromium/base/process/process_metrics_mac.cc
@@ -77,6 +77,11 @@ bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
} // namespace
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+ total = 0;
+ free = 0;
+}
+
// Getting a mach task from a pid for another process requires permissions in
// general, so there doesn't really seem to be a way to do these (and spinning
// up ps to fetch each stats seems dangerous to put in a base api for anyone to
@@ -86,7 +91,7 @@ bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
// static
ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
ProcessHandle process,
- ProcessMetrics::PortProvider* port_provider) {
+ PortProvider* port_provider) {
return new ProcessMetrics(process, port_provider);
}
@@ -325,7 +330,7 @@ bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
}
ProcessMetrics::ProcessMetrics(ProcessHandle process,
- ProcessMetrics::PortProvider* port_provider)
+ PortProvider* port_provider)
: process_(process),
last_system_time_(0),
last_absolute_idle_wakeups_(0),
@@ -358,4 +363,32 @@ size_t GetSystemCommitCharge() {
return (data.active_count * PAGE_SIZE) / 1024;
}
+// On Mac, We only get total memory and free memory from the system.
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+ struct host_basic_info hostinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ base::mac::ScopedMachSendRight host(mach_host_self());
+ int result = host_info(host, HOST_BASIC_INFO,
+ reinterpret_cast<host_info_t>(&hostinfo), &count);
+ if (result != KERN_SUCCESS)
+ return false;
+
+ DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+ meminfo->total = static_cast<int>(hostinfo.max_mem / 1024);
+
+ vm_statistics_data_t vm_info;
+ count = HOST_VM_INFO_COUNT;
+
+ if (host_statistics(host.get(), HOST_VM_INFO,
+ reinterpret_cast<host_info_t>(&vm_info),
+ &count) != KERN_SUCCESS) {
+ return false;
+ }
+
+ meminfo->free = static_cast<int>(
+ (vm_info.free_count - vm_info.speculative_count) * PAGE_SIZE / 1024);
+
+ return true;
+}
+
} // namespace base
diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc
index 76767b09a95..31479cefff6 100644
--- a/chromium/base/process/process_metrics_unittest.cc
+++ b/chromium/base/process/process_metrics_unittest.cc
@@ -270,7 +270,8 @@ TEST_F(SystemMetricsTest, ParseVmstat) {
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
-#if defined(OS_LINUX) || defined(OS_ANDROID)
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
+ defined(OS_LINUX) || defined(OS_ANDROID)
TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
base::SystemMemoryInfoKB info;
EXPECT_TRUE(base::GetSystemMemoryInfo(&info));
@@ -278,21 +279,25 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
// Ensure each field received a value.
EXPECT_GT(info.total, 0);
EXPECT_GT(info.free, 0);
+#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_GT(info.buffers, 0);
EXPECT_GT(info.cached, 0);
EXPECT_GT(info.active_anon, 0);
EXPECT_GT(info.inactive_anon, 0);
EXPECT_GT(info.active_file, 0);
EXPECT_GT(info.inactive_file, 0);
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
// All the values should be less than the total amount of memory.
EXPECT_LT(info.free, info.total);
+#if defined(OS_LINUX) || defined(OS_ANDROID)
EXPECT_LT(info.buffers, info.total);
EXPECT_LT(info.cached, info.total);
EXPECT_LT(info.active_anon, info.total);
EXPECT_LT(info.inactive_anon, info.total);
EXPECT_LT(info.active_file, info.total);
EXPECT_LT(info.inactive_file, info.total);
+#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_CHROMEOS)
// Chrome OS exposes shmem.
@@ -302,7 +307,8 @@ TEST(SystemMetrics2Test, GetSystemMemoryInfo) {
// and gem_size cannot be tested here.
#endif
}
-#endif // defined(OS_LINUX) || defined(OS_ANDROID)
+#endif // defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) ||
+ // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
TEST(ProcessMetricsTest, ParseProcStatCPU) {
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index 170f6dcaab8..c3b3e50ff61 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -6,15 +6,32 @@
#include <windows.h>
#include <psapi.h>
+#include <winternl.h>
#include "base/logging.h"
#include "base/sys_info.h"
namespace base {
+namespace {
// System pagesize. This value remains constant on x86/64 architectures.
const int PAGESIZE_KB = 4;
+typedef NTSTATUS(WINAPI* NTQUERYSYSTEMINFORMATION)(
+ SYSTEM_INFORMATION_CLASS SystemInformationClass,
+ PVOID SystemInformation,
+ ULONG SystemInformationLength,
+ PULONG ReturnLength);
+
+} // namespace
+
+SystemMemoryInfoKB::SystemMemoryInfoKB() {
+ total = 0;
+ free = 0;
+ swap_total = 0;
+ swap_free = 0;
+}
+
ProcessMetrics::~ProcessMetrics() { }
// static
@@ -285,4 +302,24 @@ size_t GetPageSize() {
return PAGESIZE_KB * 1024;
}
+// This function uses the following mapping between MEMORYSTATUSEX and
+// SystemMemoryInfoKB:
+// ullTotalPhys ==> total
+// ullAvailPhys ==> free
+// ullTotalPageFile ==> swap_total
+// ullAvailPageFile ==> swap_free
+bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
+ MEMORYSTATUSEX mem_status;
+ mem_status.dwLength = sizeof(mem_status);
+ if (!::GlobalMemoryStatusEx(&mem_status))
+ return false;
+
+ meminfo->total = mem_status.ullTotalPhys / 1024;
+ meminfo->free = mem_status.ullAvailPhys / 1024;
+ meminfo->swap_total = mem_status.ullTotalPageFile / 1024;
+ meminfo->swap_free = mem_status.ullAvailPageFile / 1024;
+
+ return true;
+}
+
} // namespace base
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index b6f22c1edcf..72e49faefca 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -255,12 +255,12 @@ Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
return Process(handle);
}
-#if !defined(OS_LINUX) && !defined(OS_MACOSX)
+#if !defined(OS_LINUX)
// static
bool Process::CanBackgroundProcesses() {
return false;
}
-#endif // !defined(OS_LINUX) && !defined(OS_MACOSX)
+#endif // !defined(OS_LINUX)
bool Process::IsValid() const {
return process_ != kNullProcessHandle;
@@ -356,7 +356,7 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
return WaitForExitWithTimeoutImpl(Handle(), exit_code, timeout);
}
-#if !defined(OS_LINUX) && !defined(OS_MACOSX)
+#if !defined(OS_LINUX)
bool Process::IsProcessBackgrounded() const {
// See SetProcessBackgrounded().
DCHECK(IsValid());
@@ -364,13 +364,13 @@ bool Process::IsProcessBackgrounded() const {
}
bool Process::SetProcessBackgrounded(bool value) {
- // Not implemented for POSIX systems other than Mac and Linux. With POSIX, if
- // we were to lower the process priority we wouldn't be able to raise it back
- // to its initial priority.
+ // Not implemented for POSIX systems other than Linux. With POSIX, if we were
+ // to lower the process priority we wouldn't be able to raise it back to its
+ // initial priority.
NOTIMPLEMENTED();
return false;
}
-#endif // !defined(OS_LINUX) && !defined(OS_MACOSX)
+#endif // !defined(OS_LINUX)
int Process::GetPriority() const {
DCHECK(IsValid());
diff --git a/chromium/base/process/process_unittest.cc b/chromium/base/process/process_unittest.cc
index e094c032f3b..9fdc2f1e028 100644
--- a/chromium/base/process/process_unittest.cc
+++ b/chromium/base/process/process_unittest.cc
@@ -11,10 +11,6 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
-#if defined(OS_MACOSX)
-#include <mach/mach.h>
-#endif // OS_MACOSX
-
namespace {
#if defined(OS_WIN)
@@ -174,26 +170,16 @@ TEST_F(ProcessTest, WaitForExitWithTimeout) {
TEST_F(ProcessTest, SetProcessBackgrounded) {
Process process(SpawnChild("SimpleChildProcess"));
int old_priority = process.GetPriority();
-#if defined(OS_MACOSX)
- // On the Mac, backgrounding a process requires a port to that process.
- // In the browser it's available through the MachBroker class, which is not
- // part of base. Additionally, there is an indefinite amount of time between
- // spawning a process and receiving its port. Because this test just checks
- // the ability to background/foreground a process, we can use the current
- // process's port instead.
- mach_port_t process_port = mach_task_self();
- EXPECT_TRUE(process.SetProcessBackgrounded(process_port, true));
- EXPECT_TRUE(process.IsProcessBackgrounded(process_port));
- EXPECT_TRUE(process.SetProcessBackgrounded(process_port, false));
- EXPECT_FALSE(process.IsProcessBackgrounded(process_port));
-#elif defined(OS_WIN)
+#if defined(OS_WIN)
EXPECT_TRUE(process.SetProcessBackgrounded(true));
EXPECT_TRUE(process.IsProcessBackgrounded());
EXPECT_TRUE(process.SetProcessBackgrounded(false));
EXPECT_FALSE(process.IsProcessBackgrounded());
#else
- process.SetProcessBackgrounded(true);
- process.SetProcessBackgrounded(false);
+ if (process.CanBackgroundProcesses()) {
+ process.SetProcessBackgrounded(true);
+ process.SetProcessBackgrounded(false);
+ }
#endif
int new_priority = process.GetPriority();
EXPECT_EQ(old_priority, new_priority);
@@ -204,13 +190,7 @@ TEST_F(ProcessTest, SetProcessBackgrounded) {
TEST_F(ProcessTest, SetProcessBackgroundedSelf) {
Process process = Process::Current();
int old_priority = process.GetPriority();
-#if defined(OS_MACOSX)
- mach_port_t process_port = mach_task_self();
- EXPECT_TRUE(process.SetProcessBackgrounded(process_port, true));
- EXPECT_TRUE(process.IsProcessBackgrounded(process_port));
- EXPECT_TRUE(process.SetProcessBackgrounded(process_port, false));
- EXPECT_FALSE(process.IsProcessBackgrounded(process_port));
-#elif defined(OS_WIN)
+#if defined(OS_WIN)
EXPECT_TRUE(process.SetProcessBackgrounded(true));
EXPECT_TRUE(process.IsProcessBackgrounded());
EXPECT_TRUE(process.SetProcessBackgrounded(false));
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 6c1a3f1d761..08144f2a18e 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -59,6 +59,9 @@
#include <malloc/malloc.h>
#include "base/mac/mac_util.h"
#endif
+#if defined(OS_ANDROID)
+#include "third_party/lss/linux_syscall_support.h"
+#endif
using base::FilePath;
@@ -75,7 +78,7 @@ const char kShellPath[] = "/system/bin/sh";
const char kPosixShell[] = "sh";
#else
const char kShellPath[] = "/bin/sh";
-const char kPosixShell[] = "bash";
+const char kPosixShell[] = "sh";
#endif
#endif // defined(OS_POSIX)
@@ -226,7 +229,19 @@ const char kSignalFileCrash[] = "CrashingChildProcess.die";
MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileCrash).c_str());
-#if defined(OS_POSIX)
+#if defined(OS_ANDROID)
+ // Android L+ expose signal and sigaction symbols that override the system
+ // ones. There is a bug in these functions where a request to set the handler
+ // to SIG_DFL is ignored. In that case, an infinite loop is entered as the
+ // signal is repeatedly sent to the crash dump signal handler.
+ // To work around this, directly call the system's sigaction.
+ struct kernel_sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sys_sigemptyset(&sa.sa_mask);
+ sa.sa_handler_ = SIG_DFL;
+ sa.sa_flags = SA_RESTART;
+ sys_rt_sigaction(SIGSEGV, &sa, NULL, sizeof(kernel_sigset_t));
+#elif defined(OS_POSIX)
// Have to disable to signal handler for segv so we can get a crash
// instead of an abnormal termination through the crash dump handler.
::signal(SIGSEGV, SIG_DFL);
diff --git a/chromium/base/process/process_win.cc b/chromium/base/process/process_win.cc
index 30cd9dc7372..818864fa528 100644
--- a/chromium/base/process/process_win.cc
+++ b/chromium/base/process/process_win.cc
@@ -6,10 +6,8 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "base/metrics/field_trial.h"
#include "base/numerics/safe_conversions.h"
#include "base/process/kill.h"
-#include "base/strings/string_util.h"
#include "base/win/windows_version.h"
namespace {
@@ -179,23 +177,7 @@ bool Process::SetProcessBackgrounded(bool value) {
priority = value ? PROCESS_MODE_BACKGROUND_BEGIN :
PROCESS_MODE_BACKGROUND_END;
} else {
- // Experiment (http://crbug.com/458594) with using IDLE_PRIORITY_CLASS as a
- // background priority for background renderers (this code path is
- // technically for more than just the renderers but they're the only use
- // case in practice and experimenting here direclty is thus easier -- plus
- // it doesn't really hurt as above we already state our intent of using
- // PROCESS_MODE_BACKGROUND_BEGIN if available which is essentially
- // IDLE_PRIORITY_CLASS plus lowered IO priority). Enabled by default in the
- // asbence of field trials to get coverage on the perf waterfall.
- DWORD background_priority = IDLE_PRIORITY_CLASS;
- base::FieldTrial* trial =
- base::FieldTrialList::Find("BackgroundRendererProcesses");
- if (trial && StartsWith(trial->group_name(), "AllowBelowNormalFromBrowser",
- CompareCase::SENSITIVE)) {
- background_priority = BELOW_NORMAL_PRIORITY_CLASS;
- }
-
- priority = value ? background_priority : NORMAL_PRIORITY_CLASS;
+ priority = value ? IDLE_PRIORITY_CLASS : NORMAL_PRIORITY_CLASS;
}
return (::SetPriorityClass(Handle(), priority) != 0);
diff --git a/chromium/base/profiler/stack_sampling_profiler_posix.cc b/chromium/base/profiler/native_stack_sampler_posix.cc
index bce37e10c3a..bce37e10c3a 100644
--- a/chromium/base/profiler/stack_sampling_profiler_posix.cc
+++ b/chromium/base/profiler/native_stack_sampler_posix.cc
diff --git a/chromium/base/profiler/stack_sampling_profiler_win.cc b/chromium/base/profiler/native_stack_sampler_win.cc
index 73b8e11ce41..bc935c0e588 100644
--- a/chromium/base/profiler/stack_sampling_profiler_win.cc
+++ b/chromium/base/profiler/native_stack_sampler_win.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/profiler/native_stack_sampler.h"
+#include "base/profiler/win32_stack_frame_unwinder.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
@@ -19,6 +20,8 @@
namespace base {
+// Stack recording functions --------------------------------------------------
+
namespace {
// Walks the stack represented by |context| from the current frame downwards,
@@ -26,41 +29,13 @@ namespace {
int RecordStack(CONTEXT* context,
int max_stack_size,
const void* instruction_pointers[],
- bool* last_frame_is_unknown_function) {
+ Win32StackFrameUnwinder* frame_unwinder) {
#ifdef _WIN64
- *last_frame_is_unknown_function = false;
-
int i = 0;
for (; (i < max_stack_size) && context->Rip; ++i) {
- // Try to look up unwind metadata for the current function.
- ULONG64 image_base;
- PRUNTIME_FUNCTION runtime_function =
- RtlLookupFunctionEntry(context->Rip, &image_base, nullptr);
-
instruction_pointers[i] = reinterpret_cast<const void*>(context->Rip);
-
- if (runtime_function) {
- KNONVOLATILE_CONTEXT_POINTERS nvcontext = {};
- void* handler_data;
- ULONG64 establisher_frame;
- RtlVirtualUnwind(0, image_base, context->Rip, runtime_function, context,
- &handler_data, &establisher_frame, &nvcontext);
- } else {
- // If we don't have a RUNTIME_FUNCTION, then in theory this should be a
- // leaf function whose frame contains only a return address, at
- // RSP. However, crash data also indicates that some third party libraries
- // do not provide RUNTIME_FUNCTION information for non-leaf functions. We
- // could manually unwind the stack in the former case, but attempting to
- // do so in the latter case would produce wrong results and likely crash,
- // so just bail out.
- //
- // Ad hoc runs with instrumentation show that ~5% of stack traces end with
- // a valid leaf function. To avoid selectively omitting these traces it
- // makes sense to ultimately try to distinguish these two cases and
- // selectively unwind the stack for legitimate leaf functions. For the
- // purposes of avoiding crashes though, just ignore them all for now.
- return i;
- }
+ if (!frame_unwinder->TryUnwind(context))
+ return i + 1;
}
return i;
#else
@@ -74,11 +49,8 @@ int RecordStack(CONTEXT* context,
// SuspendThreadAndRecordStack for why |addresses| and |module_handles| are
// arrays.
void FindModuleHandlesForAddresses(const void* const addresses[],
- HMODULE module_handles[], int stack_depth,
- bool last_frame_is_unknown_function) {
- const int module_frames =
- last_frame_is_unknown_function ? stack_depth - 1 : stack_depth;
- for (int i = 0; i < module_frames; ++i) {
+ HMODULE module_handles[], int stack_depth) {
+ for (int i = 0; i < stack_depth; ++i) {
HMODULE module_handle = NULL;
if (GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
reinterpret_cast<LPCTSTR>(addresses[i]),
@@ -128,6 +100,8 @@ std::string GetBuildIDForModule(HMODULE module_handle) {
return WideToUTF8(build_id);
}
+// ScopedDisablePriorityBoost -------------------------------------------------
+
// Disables priority boost on a thread for the lifetime of the object.
class ScopedDisablePriorityBoost {
public:
@@ -169,8 +143,9 @@ ScopedDisablePriorityBoost::~ScopedDisablePriorityBoost() {
// pointers and module handles as preallocated arrays rather than vectors, since
// vectors make it too easy to subtly allocate memory.
int SuspendThreadAndRecordStack(HANDLE thread_handle, int max_stack_size,
- const void* instruction_pointers[],
- bool* last_frame_is_unknown_function) {
+ const void* instruction_pointers[]) {
+ Win32StackFrameUnwinder frame_unwinder;
+
if (::SuspendThread(thread_handle) == -1)
return 0;
@@ -179,8 +154,7 @@ int SuspendThreadAndRecordStack(HANDLE thread_handle, int max_stack_size,
thread_context.ContextFlags = CONTEXT_FULL;
if (::GetThreadContext(thread_handle, &thread_context)) {
stack_depth = RecordStack(&thread_context, max_stack_size,
- instruction_pointers,
- last_frame_is_unknown_function);
+ instruction_pointers, &frame_unwinder);
}
// Disable the priority boost that the thread would otherwise receive on
@@ -200,6 +174,8 @@ int SuspendThreadAndRecordStack(HANDLE thread_handle, int max_stack_size,
return stack_depth;
}
+// NativeStackSamplerWin ------------------------------------------------------
+
class NativeStackSamplerWin : public NativeStackSampler {
public:
explicit NativeStackSamplerWin(win::ScopedHandle thread_handle);
@@ -264,12 +240,11 @@ void NativeStackSamplerWin::RecordStackSample(
const void* instruction_pointers[max_stack_size] = {0};
HMODULE module_handles[max_stack_size] = {0};
- bool last_frame_is_unknown_function = false;
- int stack_depth = SuspendThreadAndRecordStack(
- thread_handle_.Get(), max_stack_size, instruction_pointers,
- &last_frame_is_unknown_function);
+ int stack_depth = SuspendThreadAndRecordStack(thread_handle_.Get(),
+ max_stack_size,
+ instruction_pointers);
FindModuleHandlesForAddresses(instruction_pointers, module_handles,
- stack_depth, last_frame_is_unknown_function);
+ stack_depth);
CopyToSample(instruction_pointers, module_handles, stack_depth, sample,
current_modules_);
FreeModuleHandles(stack_depth, module_handles);
@@ -291,7 +266,7 @@ bool NativeStackSamplerWin::GetModuleForHandle(
module->filename = base::FilePath(module_name);
- module->base_address = reinterpret_cast<const void*>(module_handle);
+ module->base_address = reinterpret_cast<uintptr_t>(module_handle);
module->id = GetBuildIDForModule(module_handle);
if (module->id.empty())
@@ -330,7 +305,7 @@ void NativeStackSamplerWin::CopyToSample(
for (int i = 0; i < stack_depth; ++i) {
sample->push_back(StackSamplingProfiler::Frame(
- instruction_pointers[i],
+ reinterpret_cast<uintptr_t>(instruction_pointers[i]),
GetModuleIndex(module_handles[i], module)));
}
}
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index 9da662859fd..a197d844093 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -7,118 +7,84 @@
#include <algorithm>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/callback.h"
-#include "base/memory/singleton.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
#include "base/profiler/native_stack_sampler.h"
#include "base/synchronization/lock.h"
+#include "base/thread_task_runner_handle.h"
#include "base/timer/elapsed_timer.h"
namespace base {
-// DefaultProfileProcessor ----------------------------------------------------
-
namespace {
-// Singleton class responsible for providing the default processing for profiles
-// (i.e. for profiles generated by profilers without their own completed
-// callback).
-class DefaultProfileProcessor {
- public:
- using CompletedCallback = StackSamplingProfiler::CompletedCallback;
-
- ~DefaultProfileProcessor();
+// Used to ensure only one profiler is running at a time.
+LazyInstance<Lock> concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER;
- static DefaultProfileProcessor* GetInstance();
+// AsyncRunner ----------------------------------------------------------------
- // Sets the callback to use for processing profiles captured without a
- // per-profiler completed callback. Pending completed profiles are stored in
- // this object until a non-null callback is provided here. This function is
- // thread-safe.
- void SetCompletedCallback(CompletedCallback callback);
-
- // Processes |profiles|. This function is thread safe.
- void ProcessProfiles(
- const StackSamplingProfiler::CallStackProfiles& profiles);
+// Helper class to allow a profiler to be run completely asynchronously from the
+// initiator, without being concerned with the profiler's lifetime.
+class AsyncRunner {
+ public:
+ // Sets up a profiler and arranges for it to be deleted on its completed
+ // callback.
+ static void Run(PlatformThreadId thread_id,
+ const StackSamplingProfiler::SamplingParams& params,
+ const StackSamplingProfiler::CompletedCallback& callback);
private:
- friend struct DefaultSingletonTraits<DefaultProfileProcessor>;
-
- DefaultProfileProcessor();
-
- // Copies the pending profiles from |profiles_| into |profiles|, and clears
- // |profiles_|. This function may be called on any thread.
- void GetAndClearPendingProfiles(
- StackSamplingProfiler::CallStackProfiles* profiles);
+ AsyncRunner();
- // Gets the current completed callback, with proper locking.
- CompletedCallback GetCompletedCallback() const;
-
- mutable Lock callback_lock_;
- CompletedCallback default_completed_callback_;
+ // Runs the callback and deletes the AsyncRunner instance.
+ static void RunCallbackAndDeleteInstance(
+ scoped_ptr<AsyncRunner> object_to_be_deleted,
+ const StackSamplingProfiler::CompletedCallback& callback,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
+ const StackSamplingProfiler::CallStackProfiles& profiles);
- Lock profiles_lock_;
- StackSamplingProfiler::CallStackProfiles profiles_;
+ scoped_ptr<StackSamplingProfiler> profiler_;
- DISALLOW_COPY_AND_ASSIGN(DefaultProfileProcessor);
+ DISALLOW_COPY_AND_ASSIGN(AsyncRunner);
};
-DefaultProfileProcessor::~DefaultProfileProcessor() {}
-
// static
-DefaultProfileProcessor* DefaultProfileProcessor::GetInstance() {
- return Singleton<DefaultProfileProcessor>::get();
+void AsyncRunner::Run(
+ PlatformThreadId thread_id,
+ const StackSamplingProfiler::SamplingParams& params,
+ const StackSamplingProfiler::CompletedCallback &callback) {
+ scoped_ptr<AsyncRunner> runner(new AsyncRunner);
+ AsyncRunner* temp_ptr = runner.get();
+ temp_ptr->profiler_.reset(
+ new StackSamplingProfiler(thread_id, params,
+ Bind(&AsyncRunner::RunCallbackAndDeleteInstance,
+ Passed(&runner), callback,
+ ThreadTaskRunnerHandle::Get())));
+ // The callback won't be called until after Start(), so temp_ptr will still
+ // be valid here.
+ temp_ptr->profiler_->Start();
}
-void DefaultProfileProcessor::SetCompletedCallback(CompletedCallback callback) {
- {
- AutoLock scoped_lock(callback_lock_);
- default_completed_callback_ = callback;
- }
-
- if (!callback.is_null()) {
- // Provide any pending profiles to the callback immediately.
- StackSamplingProfiler::CallStackProfiles profiles;
- GetAndClearPendingProfiles(&profiles);
- if (!profiles.empty())
- callback.Run(profiles);
- }
-}
+AsyncRunner::AsyncRunner() {}
-void DefaultProfileProcessor::ProcessProfiles(
+void AsyncRunner::RunCallbackAndDeleteInstance(
+ scoped_ptr<AsyncRunner> object_to_be_deleted,
+ const StackSamplingProfiler::CompletedCallback& callback,
+ scoped_refptr<SingleThreadTaskRunner> task_runner,
const StackSamplingProfiler::CallStackProfiles& profiles) {
- CompletedCallback callback = GetCompletedCallback();
-
- // Store pending profiles if we don't have a valid callback.
- if (!callback.is_null()) {
- callback.Run(profiles);
- } else {
- AutoLock scoped_lock(profiles_lock_);
- profiles_.insert(profiles_.end(), profiles.begin(), profiles.end());
- }
-}
-
-DefaultProfileProcessor::DefaultProfileProcessor() {}
-
-void DefaultProfileProcessor::GetAndClearPendingProfiles(
- StackSamplingProfiler::CallStackProfiles* profiles) {
- profiles->clear();
-
- AutoLock scoped_lock(profiles_lock_);
- profiles_.swap(*profiles);
-}
-
-DefaultProfileProcessor::CompletedCallback
-DefaultProfileProcessor::GetCompletedCallback() const {
- AutoLock scoped_lock(callback_lock_);
- return default_completed_callback_;
+ callback.Run(profiles);
+ // Delete the instance on the original calling thread.
+ task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
}
} // namespace
// StackSamplingProfiler::Module ----------------------------------------------
-StackSamplingProfiler::Module::Module() : base_address(nullptr) {}
-StackSamplingProfiler::Module::Module(const void* base_address,
+StackSamplingProfiler::Module::Module() : base_address(0u) {}
+StackSamplingProfiler::Module::Module(uintptr_t base_address,
const std::string& id,
const FilePath& filename)
: base_address(base_address), id(id), filename(filename) {}
@@ -127,17 +93,17 @@ StackSamplingProfiler::Module::~Module() {}
// StackSamplingProfiler::Frame -----------------------------------------------
-StackSamplingProfiler::Frame::Frame(const void* instruction_pointer,
+StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
size_t module_index)
- : instruction_pointer(instruction_pointer),
- module_index(module_index) {}
+ : instruction_pointer(instruction_pointer), module_index(module_index) {}
StackSamplingProfiler::Frame::~Frame() {}
+StackSamplingProfiler::Frame::Frame() {}
+
// StackSamplingProfiler::CallStackProfile ------------------------------------
-StackSamplingProfiler::CallStackProfile::CallStackProfile()
- : preserve_sample_ordering(false), user_data(0) {}
+StackSamplingProfiler::CallStackProfile::CallStackProfile() {}
StackSamplingProfiler::CallStackProfile::~CallStackProfile() {}
@@ -146,7 +112,7 @@ StackSamplingProfiler::CallStackProfile::~CallStackProfile() {}
StackSamplingProfiler::SamplingThread::SamplingThread(
scoped_ptr<NativeStackSampler> native_sampler,
const SamplingParams& params,
- CompletedCallback completed_callback)
+ const CompletedCallback& completed_callback)
: native_sampler_(native_sampler.Pass()),
params_(params),
stop_event_(false, false),
@@ -158,8 +124,14 @@ StackSamplingProfiler::SamplingThread::~SamplingThread() {}
void StackSamplingProfiler::SamplingThread::ThreadMain() {
PlatformThread::SetName("Chrome_SamplingProfilerThread");
+ // For now, just ignore any requests to profile while another profiler is
+ // working.
+ if (!concurrent_profiling_lock.Get().Try())
+ return;
+
CallStackProfiles profiles;
CollectProfiles(&profiles);
+ concurrent_profiling_lock.Get().Release();
completed_callback_.Run(profiles);
}
@@ -170,14 +142,14 @@ void StackSamplingProfiler::SamplingThread::ThreadMain() {
// adhering to the sampling intervals. Once we have established users for the
// StackSamplingProfiler and the collected data to judge, we may go the other
// way or make this behavior configurable.
-bool StackSamplingProfiler::SamplingThread::CollectProfile(
+void StackSamplingProfiler::SamplingThread::CollectProfile(
CallStackProfile* profile,
- TimeDelta* elapsed_time) {
+ TimeDelta* elapsed_time,
+ bool* was_stopped) {
ElapsedTimer profile_timer;
- CallStackProfile current_profile;
- native_sampler_->ProfileRecordingStarting(&current_profile.modules);
- current_profile.sampling_period = params_.sampling_interval;
- bool burst_completed = true;
+ native_sampler_->ProfileRecordingStarting(&profile->modules);
+ profile->sampling_period = params_.sampling_interval;
+ *was_stopped = false;
TimeDelta previous_elapsed_sample_time;
for (int i = 0; i < params_.samples_per_burst; ++i) {
if (i != 0) {
@@ -186,26 +158,19 @@ bool StackSamplingProfiler::SamplingThread::CollectProfile(
if (stop_event_.TimedWait(
std::max(params_.sampling_interval - previous_elapsed_sample_time,
TimeDelta()))) {
- burst_completed = false;
+ *was_stopped = true;
break;
}
}
ElapsedTimer sample_timer;
- current_profile.samples.push_back(Sample());
- native_sampler_->RecordStackSample(&current_profile.samples.back());
+ profile->samples.push_back(Sample());
+ native_sampler_->RecordStackSample(&profile->samples.back());
previous_elapsed_sample_time = sample_timer.Elapsed();
}
*elapsed_time = profile_timer.Elapsed();
- current_profile.profile_duration = *elapsed_time;
- current_profile.preserve_sample_ordering = params_.preserve_sample_ordering;
- current_profile.user_data = params_.user_data;
+ profile->profile_duration = *elapsed_time;
native_sampler_->ProfileRecordingStopped();
-
- if (burst_completed)
- *profile = current_profile;
-
- return burst_completed;
}
// In an analogous manner to CollectProfile() and samples exceeding the expected
@@ -228,9 +193,13 @@ void StackSamplingProfiler::SamplingThread::CollectProfiles(
}
CallStackProfile profile;
- if (!CollectProfile(&profile, &previous_elapsed_profile_time))
+ bool was_stopped = false;
+ CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped);
+ if (!profile.samples.empty())
+ profiles->push_back(profile);
+
+ if (was_stopped)
return;
- profiles->push_back(profile);
}
}
@@ -245,18 +214,12 @@ StackSamplingProfiler::SamplingParams::SamplingParams()
bursts(1),
burst_interval(TimeDelta::FromMilliseconds(10000)),
samples_per_burst(300),
- sampling_interval(TimeDelta::FromMilliseconds(100)),
- preserve_sample_ordering(false),
- user_data(0) {
+ sampling_interval(TimeDelta::FromMilliseconds(100)) {
}
StackSamplingProfiler::StackSamplingProfiler(PlatformThreadId thread_id,
- const SamplingParams& params)
- : thread_id_(thread_id), params_(params) {}
-
-StackSamplingProfiler::StackSamplingProfiler(PlatformThreadId thread_id,
const SamplingParams& params,
- CompletedCallback callback)
+ const CompletedCallback& callback)
: thread_id_(thread_id), params_(params), completed_callback_(callback) {}
StackSamplingProfiler::~StackSamplingProfiler() {
@@ -265,18 +228,26 @@ StackSamplingProfiler::~StackSamplingProfiler() {
PlatformThread::Join(sampling_thread_handle_);
}
+// static
+void StackSamplingProfiler::StartAndRunAsync(
+ PlatformThreadId thread_id,
+ const SamplingParams& params,
+ const CompletedCallback& callback) {
+ CHECK(ThreadTaskRunnerHandle::Get());
+ AsyncRunner::Run(thread_id, params, callback);
+}
+
void StackSamplingProfiler::Start() {
+ if (completed_callback_.is_null())
+ return;
+
scoped_ptr<NativeStackSampler> native_sampler =
NativeStackSampler::Create(thread_id_);
if (!native_sampler)
return;
- CompletedCallback callback =
- !completed_callback_.is_null() ? completed_callback_ :
- Bind(&DefaultProfileProcessor::ProcessProfiles,
- Unretained(DefaultProfileProcessor::GetInstance()));
sampling_thread_.reset(
- new SamplingThread(native_sampler.Pass(), params_, callback));
+ new SamplingThread(native_sampler.Pass(), params_, completed_callback_));
if (!PlatformThread::Create(0, sampling_thread_.get(),
&sampling_thread_handle_))
sampling_thread_.reset();
@@ -287,12 +258,6 @@ void StackSamplingProfiler::Stop() {
sampling_thread_->Stop();
}
-// static
-void StackSamplingProfiler::SetDefaultCompletedCallback(
- CompletedCallback callback) {
- DefaultProfileProcessor::GetInstance()->SetCompletedCallback(callback);
-}
-
// StackSamplingProfiler::Frame global functions ------------------------------
bool operator==(const StackSamplingProfiler::Frame &a,
diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h
index 9d52f27a7ef..9aa9c31d67a 100644
--- a/chromium/base/profiler/stack_sampling_profiler.h
+++ b/chromium/base/profiler/stack_sampling_profiler.h
@@ -49,14 +49,9 @@ class NativeStackSampler;
// a 10Hz interval for a total of 30 seconds. All of these parameters may be
// altered as desired.
//
-// When all call stack profiles are complete or the profiler is stopped, if the
-// custom completed callback was set it is called from a thread created by the
-// profiler with the completed profiles. A profile is considered complete if all
-// requested samples were recorded for the profile (i.e. it was not stopped
-// prematurely). If no callback was set, the default completed callback will be
-// called with the profiles. It is expected that the the default completed
-// callback is set by the metrics system to allow profiles to be provided via
-// UMA.
+// When all call stack profiles are complete, or the profiler is stopped, the
+// completed callback is called from a thread created by the profiler with the
+// collected profiles.
//
// The results of the profiling are passed to the completed callback and consist
// of a vector of CallStackProfiles. Each CallStackProfile corresponds to a
@@ -68,12 +63,13 @@ class BASE_EXPORT StackSamplingProfiler {
// Module represents the module (DLL or exe) corresponding to a stack frame.
struct BASE_EXPORT Module {
Module();
- Module(const void* base_address, const std::string& id,
+ Module(uintptr_t base_address,
+ const std::string& id,
const FilePath& filename);
~Module();
// Points to the base address of the module.
- const void* base_address;
+ uintptr_t base_address;
// An opaque binary string that uniquely identifies a particular program
// version with high probability. This is parsed from headers of the loaded
@@ -93,11 +89,14 @@ class BASE_EXPORT StackSamplingProfiler {
// Identifies an unknown module.
static const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
- Frame(const void* instruction_pointer, size_t module_index);
+ Frame(uintptr_t instruction_pointer, size_t module_index);
~Frame();
+ // Default constructor to satisfy IPC macros. Do not use explicitly.
+ Frame();
+
// The sampled instruction pointer within the function.
- const void* instruction_pointer;
+ uintptr_t instruction_pointer;
// Index of the module in CallStackProfile::modules. We don't represent
// module state directly here to save space.
@@ -120,13 +119,6 @@ class BASE_EXPORT StackSamplingProfiler {
// Time between samples.
TimeDelta sampling_period;
-
- // True if sample ordering is important and should be preserved if and when
- // this profile is compressed and processed.
- bool preserve_sample_ordering;
-
- // User data associated with this profile.
- uintptr_t user_data;
};
using CallStackProfiles = std::vector<CallStackProfile>;
@@ -152,13 +144,6 @@ class BASE_EXPORT StackSamplingProfiler {
// duration from the start of one sample to the start of the next
// sample. Defaults to 100ms.
TimeDelta sampling_interval;
-
- // True if sample ordering is important and should be preserved if and when
- // this profile is compressed and processed. Defaults to false.
- bool preserve_sample_ordering;
-
- // User data associated with this profile.
- uintptr_t user_data;
};
// The callback type used to collect completed profiles.
@@ -171,33 +156,29 @@ class BASE_EXPORT StackSamplingProfiler {
// thread-safe callback implementation.
using CompletedCallback = Callback<void(const CallStackProfiles&)>;
- // Creates a profiler that sends completed profiles to the default completed
- // callback.
- StackSamplingProfiler(PlatformThreadId thread_id,
- const SamplingParams& params);
- // Creates a profiler that sends completed profiles to |completed_callback|.
+ // Creates a profiler that sends completed profiles to |callback|.
StackSamplingProfiler(PlatformThreadId thread_id,
const SamplingParams& params,
- CompletedCallback callback);
+ const CompletedCallback& callback);
+ // Stops any profiling currently taking place before destroying the profiler.
~StackSamplingProfiler();
+ // The fire-and-forget interface: starts a profiler and allows it to complete
+ // without the caller needing to manage the profiler lifetime. May be invoked
+ // from any thread, but requires that the calling thread has a message loop.
+ static void StartAndRunAsync(PlatformThreadId thread_id,
+ const SamplingParams& params,
+ const CompletedCallback& callback);
+
// Initializes the profiler and starts sampling.
void Start();
// Stops the profiler and any ongoing sampling. Calling this function is
// optional; if not invoked profiling terminates when all the profiling bursts
- // specified in the SamplingParams are completed.
+ // specified in the SamplingParams are completed or the profiler is destroyed,
+ // whichever occurs first.
void Stop();
- // Sets a callback to process profiles collected by profiler instances without
- // a completed callback. Profiles are queued internally until a non-null
- // callback is provided to this function,
- //
- // The callback is typically called on a thread created by the profiler. If
- // completed profiles are queued when set, however, it will also be called
- // immediately on the calling thread.
- static void SetDefaultCompletedCallback(CompletedCallback callback);
-
private:
// SamplingThread is a separate thread used to suspend and sample stacks from
// the target thread.
@@ -208,7 +189,7 @@ class BASE_EXPORT StackSamplingProfiler {
// |completed_callback| must be callable on any thread.
SamplingThread(scoped_ptr<NativeStackSampler> native_sampler,
const SamplingParams& params,
- CompletedCallback completed_callback);
+ const CompletedCallback& completed_callback);
~SamplingThread() override;
// PlatformThread::Delegate:
@@ -217,14 +198,15 @@ class BASE_EXPORT StackSamplingProfiler {
void Stop();
private:
- // Collects a call stack profile from a single burst. Returns true if the
- // profile was collected, or false if collection was stopped before it
- // completed.
- bool CollectProfile(CallStackProfile* profile, TimeDelta* elapsed_time);
+ // Collects |profile| from a single burst. If the profiler was stopped
+ // during collection, sets |was_stopped| and provides the set of samples
+ // collected up to that point.
+ void CollectProfile(CallStackProfile* profile, TimeDelta* elapsed_time,
+ bool* was_stopped);
// Collects call stack profiles from all bursts, or until the sampling is
- // stopped. If stopped before complete, |call_stack_profiles| will contain
- // only full bursts.
+ // stopped. If stopped before complete, the last profile in
+ // |call_stack_profiles| may contain a partial burst.
void CollectProfiles(CallStackProfiles* profiles);
scoped_ptr<NativeStackSampler> native_sampler_;
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 5ade15a6edc..3fceed49ade 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -4,14 +4,23 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
+#include "base/memory/scoped_vector.h"
+#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
#include "base/profiler/stack_sampling_profiler.h"
+#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
+// STACK_SAMPLING_PROFILER_SUPPORTED is used to conditionally enable the tests
+// below for supported platforms (currently Win x64).
+#if defined(_WIN64)
+#define STACK_SAMPLING_PROFILER_SUPPORTED 1
+#endif
+
namespace base {
using SamplingParams = StackSamplingProfiler::SamplingParams;
@@ -124,9 +133,8 @@ void WithTargetThread(Function function) {
// Captures profiles as specified by |params| on the TargetThread, and returns
// them in |profiles|. Waits up to |profiler_wait_time| for the profiler to
// complete.
-void CaptureProfilesWithObjectCallback(const SamplingParams& params,
- CallStackProfiles* profiles,
- TimeDelta profiler_wait_time) {
+void CaptureProfiles(const SamplingParams& params, TimeDelta profiler_wait_time,
+ CallStackProfiles* profiles) {
profiles->clear();
WithTargetThread([&params, profiles, profiler_wait_time](
@@ -143,43 +151,6 @@ void CaptureProfilesWithObjectCallback(const SamplingParams& params,
});
}
-// Captures profiles as specified by |params| on the TargetThread, and returns
-// them in |profiles|. Uses the default callback rather than a per-object
-// callback.
-void CaptureProfilesWithDefaultCallback(const SamplingParams& params,
- CallStackProfiles* profiles) {
- profiles->clear();
-
- WithTargetThread([&params, profiles](PlatformThreadId target_thread_id) {
- WaitableEvent sampling_thread_completed(false, false);
- StackSamplingProfiler::SetDefaultCompletedCallback(
- Bind(&SaveProfilesAndSignalEvent, Unretained(profiles),
- Unretained(&sampling_thread_completed)));
-
- StackSamplingProfiler profiler(target_thread_id, params);
- profiler.Start();
- sampling_thread_completed.Wait();
-
- StackSamplingProfiler::SetDefaultCompletedCallback(
- StackSamplingProfiler::CompletedCallback());
- });
-}
-
-// Runs the profiler with |params| on the TargetThread, with no default or
-// per-object callback.
-void RunProfilerWithNoCallback(const SamplingParams& params,
- TimeDelta profiler_wait_time) {
- WithTargetThread([&params, profiler_wait_time](
- PlatformThreadId target_thread_id) {
- StackSamplingProfiler profiler(target_thread_id, params);
- profiler.Start();
- // Since we don't specify a callback, we don't have a synchronization
- // mechanism with the sampling thread. Just sleep instead.
- PlatformThread::Sleep(profiler_wait_time);
- profiler.Stop();
- });
-}
-
// If this executable was linked with /INCREMENTAL (the default for non-official
// debug and release builds on Windows), function addresses do not correspond to
// function code itself, but instead to instructions in the Incremental Link
@@ -212,8 +183,9 @@ Sample::const_iterator FindFirstFrameWithinFunction(
int function_size) {
function_address = MaybeFixupFunctionAddressForILT(function_address);
for (auto it = sample.begin(); it != sample.end(); ++it) {
- if ((it->instruction_pointer >= function_address) &&
- (it->instruction_pointer <
+ if ((reinterpret_cast<const void*>(it->instruction_pointer) >=
+ function_address) &&
+ (reinterpret_cast<const void*>(it->instruction_pointer) <
(static_cast<const unsigned char*>(function_address) + function_size)))
return it;
}
@@ -227,7 +199,7 @@ std::string FormatSampleForDiagnosticOutput(
std::string output;
for (const Frame& frame: sample) {
output += StringPrintf(
- "0x%p %s\n", frame.instruction_pointer,
+ "0x%p %s\n", reinterpret_cast<const void*>(frame.instruction_pointer),
modules[frame.module_index].filename.AsUTF8Unsafe().c_str());
}
return output;
@@ -239,13 +211,9 @@ TimeDelta AVeryLongTimeDelta() { return TimeDelta::FromDays(1); }
} // namespace
-
-// The tests below are enabled for Win x64 only, pending implementation of the
-// tested functionality on other platforms/architectures.
-
// Checks that the basic expected information is present in a sampled call stack
// profile.
-#if defined(_WIN64)
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
#define MAYBE_Basic Basic
#else
#define MAYBE_Basic DISABLED_Basic
@@ -254,11 +222,9 @@ TEST(StackSamplingProfilerTest, MAYBE_Basic) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
params.samples_per_burst = 1;
- params.user_data = 100;
- params.preserve_sample_ordering = true;
std::vector<CallStackProfile> profiles;
- CaptureProfilesWithObjectCallback(params, &profiles, AVeryLongTimeDelta());
+ CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
// Check that the profile and samples sizes are correct, and the module
// indices are in range.
@@ -271,8 +237,6 @@ TEST(StackSamplingProfilerTest, MAYBE_Basic) {
ASSERT_GE(frame.module_index, 0u);
ASSERT_LT(frame.module_index, profile.modules.size());
}
- EXPECT_EQ(100u, profile.user_data);
- EXPECT_EQ(true, profile.preserve_sample_ordering);
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled() and that the frame has this
@@ -296,9 +260,36 @@ TEST(StackSamplingProfilerTest, MAYBE_Basic) {
EXPECT_EQ(executable_path, profile.modules[loc->module_index].filename);
}
+// Checks that the fire-and-forget interface works.
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
+#define MAYBE_StartAndRunAsync StartAndRunAsync
+#else
+#define MAYBE_StartAndRunAsync DISABLED_StartAndRunAsync
+#endif
+TEST(StackSamplingProfilerTest, MAYBE_StartAndRunAsync) {
+ // StartAndRunAsync requires the caller to have a message loop.
+ MessageLoop message_loop;
+
+ SamplingParams params;
+ params.samples_per_burst = 1;
+
+ CallStackProfiles profiles;
+ WithTargetThread([&params, &profiles](PlatformThreadId target_thread_id) {
+ WaitableEvent sampling_thread_completed(false, false);
+ const StackSamplingProfiler::CompletedCallback callback =
+ Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
+ Unretained(&sampling_thread_completed));
+ StackSamplingProfiler::StartAndRunAsync(target_thread_id, params, callback);
+ RunLoop().RunUntilIdle();
+ sampling_thread_completed.Wait();
+ });
+
+ ASSERT_EQ(1u, profiles.size());
+}
+
// Checks that the expected number of profiles and samples are present in the
// call stack profiles produced.
-#if defined(_WIN64)
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
#define MAYBE_MultipleProfilesAndSamples MultipleProfilesAndSamples
#else
#define MAYBE_MultipleProfilesAndSamples DISABLED_MultipleProfilesAndSamples
@@ -311,7 +302,7 @@ TEST(StackSamplingProfilerTest, MAYBE_MultipleProfilesAndSamples) {
params.samples_per_burst = 3;
std::vector<CallStackProfile> profiles;
- CaptureProfilesWithObjectCallback(params, &profiles, AVeryLongTimeDelta());
+ CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
ASSERT_EQ(2u, profiles.size());
EXPECT_EQ(3u, profiles[0].samples.size());
@@ -320,7 +311,7 @@ TEST(StackSamplingProfilerTest, MAYBE_MultipleProfilesAndSamples) {
// Checks that no call stack profiles are captured if the profiling is stopped
// during the initial delay.
-#if defined(_WIN64)
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
#define MAYBE_StopDuringInitialDelay StopDuringInitialDelay
#else
#define MAYBE_StopDuringInitialDelay DISABLED_StopDuringInitialDelay
@@ -330,15 +321,14 @@ TEST(StackSamplingProfilerTest, MAYBE_StopDuringInitialDelay) {
params.initial_delay = TimeDelta::FromSeconds(60);
std::vector<CallStackProfile> profiles;
- CaptureProfilesWithObjectCallback(params, &profiles,
- TimeDelta::FromMilliseconds(0));
+ CaptureProfiles(params, TimeDelta::FromMilliseconds(0), &profiles);
EXPECT_TRUE(profiles.empty());
}
// Checks that the single completed call stack profile is captured if the
// profiling is stopped between bursts.
-#if defined(_WIN64)
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
#define MAYBE_StopDuringInterBurstInterval StopDuringInterBurstInterval
#else
#define MAYBE_StopDuringInterBurstInterval DISABLED_StopDuringInterBurstInterval
@@ -351,15 +341,14 @@ TEST(StackSamplingProfilerTest, MAYBE_StopDuringInterBurstInterval) {
params.samples_per_burst = 1;
std::vector<CallStackProfile> profiles;
- CaptureProfilesWithObjectCallback(params, &profiles,
- TimeDelta::FromMilliseconds(50));
+ CaptureProfiles(params, TimeDelta::FromMilliseconds(50), &profiles);
ASSERT_EQ(1u, profiles.size());
EXPECT_EQ(1u, profiles[0].samples.size());
}
-// Checks that only completed call stack profiles are captured.
-#if defined(_WIN64)
+// Checks that incomplete call stack profiles are captured.
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
#define MAYBE_StopDuringInterSampleInterval StopDuringInterSampleInterval
#else
#define MAYBE_StopDuringInterSampleInterval \
@@ -371,54 +360,14 @@ TEST(StackSamplingProfilerTest, MAYBE_StopDuringInterSampleInterval) {
params.samples_per_burst = 2;
std::vector<CallStackProfile> profiles;
- CaptureProfilesWithObjectCallback(params, &profiles,
- TimeDelta::FromMilliseconds(50));
-
- EXPECT_TRUE(profiles.empty());
-}
-
-// Checks that profiles are captured via the default completed callback.
-#if defined(_WIN64)
-#define MAYBE_DefaultCallback DefaultCallback
-#else
-#define MAYBE_DefaultCallback DISABLED_DefaultCallback
-#endif
-TEST(StackSamplingProfilerTest, MAYBE_DefaultCallback) {
- SamplingParams params;
- params.samples_per_burst = 1;
-
- CallStackProfiles profiles;
- CaptureProfilesWithDefaultCallback(params, &profiles);
-
- EXPECT_EQ(1u, profiles.size());
- EXPECT_EQ(1u, profiles[0].samples.size());
-}
-
-// Checks that profiles are queued until a default callback is set, then
-// delivered.
-#if defined(_WIN64)
-#define MAYBE_ProfilesQueuedWithNoCallback ProfilesQueuedWithNoCallback
-#else
-#define MAYBE_ProfilesQueuedWithNoCallback DISABLED_ProfilesQueuedWithNoCallback
-#endif
-TEST(StackSamplingProfilerTest, MAYBE_ProfilesQueuedWithNoCallback) {
- SamplingParams params;
- params.samples_per_burst = 1;
+ CaptureProfiles(params, TimeDelta::FromMilliseconds(50), &profiles);
- RunProfilerWithNoCallback(params, TimeDelta::FromMilliseconds(50));
-
- CallStackProfiles profiles;
- // This should immediately call SaveProfiles on this thread.
- StackSamplingProfiler::SetDefaultCompletedCallback(
- Bind(&SaveProfiles, Unretained(&profiles)));
- EXPECT_EQ(1u, profiles.size());
+ ASSERT_EQ(1u, profiles.size());
EXPECT_EQ(1u, profiles[0].samples.size());
- StackSamplingProfiler::SetDefaultCompletedCallback(
- StackSamplingProfiler::CompletedCallback());
}
// Checks that we can destroy the profiler while profiling.
-#if defined(_WIN64)
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
#define MAYBE_DestroyProfilerWhileProfiling DestroyProfilerWhileProfiling
#else
#define MAYBE_DestroyProfilerWhileProfiling \
@@ -442,4 +391,71 @@ TEST(StackSamplingProfilerTest, MAYBE_DestroyProfilerWhileProfiling) {
});
}
+// Checks that the same profiler may be run multiple times.
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
+#define MAYBE_CanRunMultipleTimes CanRunMultipleTimes
+#else
+#define MAYBE_CanRunMultipleTimes DISABLED_CanRunMultipleTimes
+#endif
+TEST(StackSamplingProfilerTest, MAYBE_CanRunMultipleTimes) {
+ SamplingParams params;
+ params.sampling_interval = TimeDelta::FromMilliseconds(0);
+ params.samples_per_burst = 1;
+
+ std::vector<CallStackProfile> profiles;
+ CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+ ASSERT_EQ(1u, profiles.size());
+
+ profiles.clear();
+ CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
+ ASSERT_EQ(1u, profiles.size());
+}
+
+// Checks that requests to start profiling while another profile is taking place
+// are ignored.
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
+#define MAYBE_ConcurrentProfiling ConcurrentProfiling
+#else
+#define MAYBE_ConcurrentProfiling DISABLED_ConcurrentProfiling
+#endif
+TEST(StackSamplingProfilerTest, MAYBE_ConcurrentProfiling) {
+ WithTargetThread([](PlatformThreadId target_thread_id) {
+ SamplingParams params[2];
+ params[0].initial_delay = TimeDelta::FromMilliseconds(10);
+ params[0].sampling_interval = TimeDelta::FromMilliseconds(0);
+ params[0].samples_per_burst = 1;
+
+ params[1].sampling_interval = TimeDelta::FromMilliseconds(0);
+ params[1].samples_per_burst = 1;
+
+ CallStackProfiles profiles[2];
+ ScopedVector<WaitableEvent> sampling_completed;
+ ScopedVector<StackSamplingProfiler> profiler;
+ for (int i = 0; i < 2; ++i) {
+ sampling_completed.push_back(new WaitableEvent(false, false));
+ const StackSamplingProfiler::CompletedCallback callback =
+ Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles[i]),
+ Unretained(sampling_completed[i]));
+ profiler.push_back(
+ new StackSamplingProfiler(target_thread_id, params[i], callback));
+ }
+
+ profiler[0]->Start();
+ profiler[1]->Start();
+
+ // Wait for the first profiler to finish.
+ sampling_completed[0]->Wait();
+ EXPECT_EQ(1u, profiles[0].size());
+
+ // Give the second profiler a chance to run and observe that it hasn't.
+ EXPECT_FALSE(
+ sampling_completed[1]->TimedWait(TimeDelta::FromMilliseconds(25)));
+
+ // Start the second profiler again and it should run.
+ profiler[1]->Start();
+ sampling_completed[1]->Wait();
+ EXPECT_EQ(1u, profiles[1].size());
+ });
+}
+
} // namespace base
diff --git a/chromium/base/profiler/win32_stack_frame_unwinder.cc b/chromium/base/profiler/win32_stack_frame_unwinder.cc
new file mode 100644
index 00000000000..4ab3a3e1048
--- /dev/null
+++ b/chromium/base/profiler/win32_stack_frame_unwinder.cc
@@ -0,0 +1,212 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/win32_stack_frame_unwinder.h"
+
+#include "base/containers/hash_tables.h"
+#include "base/memory/singleton.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+// LeafUnwindBlacklist --------------------------------------------------------
+
+namespace {
+
+// Records modules that are known to have functions that violate the Microsoft
+// x64 calling convention and would be dangerous to manually unwind if
+// encountered as the last frame on the call stack. Functions like these have
+// been observed in injected third party modules that either do not provide
+// function unwind information, or do not provide the required function prologue
+// and epilogue. The former case was observed in several AV products and the
+// latter in a WndProc function associated with Actual Window
+// Manager/aimemb64.dll. See https://crbug.com/476422.
+class LeafUnwindBlacklist {
+ public:
+ static LeafUnwindBlacklist* GetInstance();
+
+ // This function does not allocate memory and is safe to call between
+ // SuspendThread and ResumeThread.
+ bool IsBlacklisted(const void* module) const;
+
+ // Allocates memory. Must be invoked only after ResumeThread, otherwise we
+ // risk deadlocking on a heap lock held by a suspended thread.
+ void AddModuleToBlacklist(const void* module);
+
+ private:
+ friend struct DefaultSingletonTraits<LeafUnwindBlacklist>;
+
+ LeafUnwindBlacklist();
+ ~LeafUnwindBlacklist();
+
+ // The set of modules known to have functions that violate the Microsoft x64
+ // calling convention.
+ base::hash_set<const void*> blacklisted_modules_;
+
+ DISALLOW_COPY_AND_ASSIGN(LeafUnwindBlacklist);
+};
+
+// static
+LeafUnwindBlacklist* LeafUnwindBlacklist::GetInstance() {
+ // Leaky for shutdown performance.
+ return Singleton<LeafUnwindBlacklist,
+ LeakySingletonTraits<LeafUnwindBlacklist>>::get();
+}
+
+bool LeafUnwindBlacklist::IsBlacklisted(const void* module) const {
+ return ContainsKey(blacklisted_modules_, module);
+}
+
+void LeafUnwindBlacklist::AddModuleToBlacklist(const void* module) {
+ CHECK(module);
+ blacklisted_modules_.insert(module);
+}
+
+LeafUnwindBlacklist::LeafUnwindBlacklist() {}
+LeafUnwindBlacklist::~LeafUnwindBlacklist() {}
+
+} // namespace
+
+// Win32StackFrameUnwinder ----------------------------------------------------
+
+Win32StackFrameUnwinder::UnwindFunctions::~UnwindFunctions() {}
+Win32StackFrameUnwinder::UnwindFunctions::UnwindFunctions() {}
+
+Win32StackFrameUnwinder::Win32UnwindFunctions::Win32UnwindFunctions() {}
+
+PRUNTIME_FUNCTION Win32StackFrameUnwinder::Win32UnwindFunctions::
+LookupFunctionEntry(DWORD64 program_counter, PDWORD64 image_base) {
+#ifdef _WIN64
+ return RtlLookupFunctionEntry(program_counter, image_base, nullptr);
+#else
+ NOTREACHED();
+ return nullptr;
+#endif
+}
+
+void Win32StackFrameUnwinder::Win32UnwindFunctions::VirtualUnwind(
+ DWORD64 image_base,
+ DWORD64 program_counter,
+ PRUNTIME_FUNCTION runtime_function,
+ CONTEXT* context) {
+#ifdef _WIN64
+ void* handler_data;
+ ULONG64 establisher_frame;
+ KNONVOLATILE_CONTEXT_POINTERS nvcontext = {};
+ RtlVirtualUnwind(0, image_base, program_counter, runtime_function,
+ context, &handler_data, &establisher_frame, &nvcontext);
+#else
+ NOTREACHED();
+#endif
+}
+
+
+Win32StackFrameUnwinder::Win32StackFrameUnwinder()
+ : Win32StackFrameUnwinder(&win32_unwind_functions_) {
+}
+
+Win32StackFrameUnwinder::~Win32StackFrameUnwinder() {
+ if (pending_blacklisted_module_) {
+ LeafUnwindBlacklist::GetInstance()->AddModuleToBlacklist(
+ pending_blacklisted_module_);
+ }
+}
+
+bool Win32StackFrameUnwinder::TryUnwind(CONTEXT* context) {
+#ifdef _WIN64
+ CHECK(!at_top_frame_ || unwind_info_present_for_all_frames_);
+ CHECK(!pending_blacklisted_module_);
+
+ ULONG64 image_base;
+ // Try to look up unwind metadata for the current function.
+ PRUNTIME_FUNCTION runtime_function =
+ unwind_functions_->LookupFunctionEntry(context->Rip, &image_base);
+
+ if (runtime_function) {
+ unwind_functions_->VirtualUnwind(image_base, context->Rip, runtime_function,
+ context);
+ at_top_frame_ = false;
+ } else {
+ // RtlLookupFunctionEntry didn't find unwind information. This could mean
+ // the code at the instruction pointer is in:
+ //
+ // 1. a true leaf function (i.e. a function that neither calls a function,
+ // nor allocates any stack space itself) in which case the return
+ // address is at RSP, or
+ //
+ // 2. a function that doesn't adhere to the Microsoft x64 calling
+ // convention, either by not providing the required unwind information,
+ // or by not having the prologue or epilogue required for unwinding;
+ // this case has been observed in crash data in injected third party
+ // DLLs.
+ //
+ // In valid code, case 1 can only occur (by definition) as the last frame
+ // on the stack. This happens in about 5% of observed stacks and can
+ // easily be unwound by popping RSP and using it as the next frame's
+ // instruction pointer.
+ //
+ // Case 2 can occur anywhere on the stack, and attempting to unwind the
+ // stack will result in treating whatever value happens to be on the stack
+ // at RSP as the next frame's instruction pointer. This is certainly wrong
+ // and very likely to lead to crashing by deferencing invalid pointers in
+ // the next RtlVirtualUnwind call.
+ //
+ // If we see case 2 at a location not the last frame, and all the previous
+ // frame had valid unwind information, then this is definitely bad code.
+ // We blacklist the module as untrustable for unwinding if we encounter a
+ // function in it that doesn't have unwind information.
+
+ if (at_top_frame_) {
+ at_top_frame_ = false;
+
+ // We are at the end of the stack. It's very likely that we're in case 1
+ // since the vast majority of code adheres to the Microsoft x64 calling
+ // convention. But there's a small chance we might be unlucky and be in
+ // case 2. If this module is known to have bad code according to the
+ // leaf unwind blacklist, stop here, otherwise manually unwind.
+ if (LeafUnwindBlacklist::GetInstance()->IsBlacklisted(
+ reinterpret_cast<const void*>(image_base))) {
+ return false;
+ }
+
+ context->Rip = context->Rsp;
+ context->Rsp += 8;
+ unwind_info_present_for_all_frames_ = false;
+ } else {
+ // We're not at the end of the stack. This frame is untrustworthy and we
+ // can't safely unwind from here.
+ if (unwind_info_present_for_all_frames_) {
+ // Unwind information was present for all previous frames, so we can
+ // be confident this is case 2. Record the module to be blacklisted.
+ pending_blacklisted_module_ =
+ reinterpret_cast<const void *>(image_base);
+ } else {
+ // We started off on a function without unwind information. It's very
+ // likely that all frames up to this point have been good, and this
+ // frame is case 2. But it's possible that the initial frame was case
+ // 2 but hadn't been blacklisted yet, and we've started to go off into
+ // the weeds. Since we can't be sure, just bail out without
+ // blacklisting the module; chances are we'll later encounter the same
+ // function on a stack with full unwind information.
+ }
+ return false;
+ }
+ }
+
+ return true;
+#else
+ NOTREACHED();
+ return false;
+#endif
+}
+
+Win32StackFrameUnwinder::Win32StackFrameUnwinder(
+ UnwindFunctions* unwind_functions)
+ : at_top_frame_(true),
+ unwind_info_present_for_all_frames_(true),
+ pending_blacklisted_module_(nullptr),
+ unwind_functions_(unwind_functions) {
+}
+
+} // namespace base
diff --git a/chromium/base/profiler/win32_stack_frame_unwinder.h b/chromium/base/profiler/win32_stack_frame_unwinder.h
new file mode 100644
index 00000000000..a45d5778097
--- /dev/null
+++ b/chromium/base/profiler/win32_stack_frame_unwinder.h
@@ -0,0 +1,83 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROFILER_WIN32_STACK_FRAME_UNWINDER_H_
+#define BASE_PROFILER_WIN32_STACK_FRAME_UNWINDER_H_
+
+#include <windows.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+
+#if !defined(_WIN64)
+// Allows code to compile for x86. Actual support for x86 will require either
+// refactoring these interfaces or separate architecture-specific interfaces.
+using PRUNTIME_FUNCTION = void*;
+#endif // !defined(_WIN64)
+
+// Instances of this class are expected to be created and destroyed for each
+// stack unwinding, outside of SuspendThread/ResumeThread.
+class BASE_EXPORT Win32StackFrameUnwinder {
+ public:
+ // Interface for Win32 unwind-related functionality this class depends
+ // on. Provides a seam for testing.
+ class BASE_EXPORT UnwindFunctions {
+ public:
+ virtual ~UnwindFunctions();
+
+ virtual PRUNTIME_FUNCTION LookupFunctionEntry(DWORD64 program_counter,
+ PDWORD64 image_base) = 0;
+ virtual void VirtualUnwind(DWORD64 image_base,
+ DWORD64 program_counter,
+ PRUNTIME_FUNCTION runtime_function,
+ CONTEXT* context) = 0;
+ protected:
+ UnwindFunctions();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(UnwindFunctions);
+ };
+
+ class BASE_EXPORT Win32UnwindFunctions : public UnwindFunctions {
+ public:
+ Win32UnwindFunctions();
+
+ PRUNTIME_FUNCTION LookupFunctionEntry(DWORD64 program_counter,
+ PDWORD64 image_base) override;
+
+ void VirtualUnwind(DWORD64 image_base,
+ DWORD64 program_counter,
+ PRUNTIME_FUNCTION runtime_function,
+ CONTEXT* context) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Win32UnwindFunctions);
+ };
+
+ Win32StackFrameUnwinder();
+ ~Win32StackFrameUnwinder();
+
+ bool TryUnwind(CONTEXT* context);
+
+ private:
+ // This function is for test purposes only.
+ Win32StackFrameUnwinder(UnwindFunctions* unwind_functions);
+ friend class Win32StackFrameUnwinderTest;
+
+ // State associated with each stack unwinding.
+ bool at_top_frame_;
+ bool unwind_info_present_for_all_frames_;
+ const void* pending_blacklisted_module_;
+
+ Win32UnwindFunctions win32_unwind_functions_;
+ UnwindFunctions* const unwind_functions_;
+
+ DISALLOW_COPY_AND_ASSIGN(Win32StackFrameUnwinder);
+};
+
+} // namespace base
+
+#endif // BASE_PROFILER_WIN32_STACK_FRAME_UNWINDER_H_
diff --git a/chromium/base/profiler/win32_stack_frame_unwinder_unittest.cc b/chromium/base/profiler/win32_stack_frame_unwinder_unittest.cc
new file mode 100644
index 00000000000..a27379322f2
--- /dev/null
+++ b/chromium/base/profiler/win32_stack_frame_unwinder_unittest.cc
@@ -0,0 +1,231 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/profiler/win32_stack_frame_unwinder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class TestUnwindFunctions : public Win32StackFrameUnwinder::UnwindFunctions {
+ public:
+ TestUnwindFunctions();
+
+ PRUNTIME_FUNCTION LookupFunctionEntry(DWORD64 program_counter,
+ PDWORD64 image_base) override;
+ void VirtualUnwind(DWORD64 image_base,
+ DWORD64 program_counter,
+ PRUNTIME_FUNCTION runtime_function,
+ CONTEXT* context) override;
+
+ void SetNoUnwindInfoForNextFrame();
+ void SetImageBaseForNextFrame(DWORD64 image_base);
+
+ private:
+ enum { kRuntimeFunctionPointerIncrement = 1, kImageBaseIncrement = 1 << 20 };
+
+ static const PRUNTIME_FUNCTION kNonNullRuntimeFunctionPointer;
+
+ DWORD64 supplied_program_counter_;
+ DWORD64 custom_image_base_;
+ DWORD64 next_image_base_;
+ bool next_lookup_returns_null_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestUnwindFunctions);
+};
+
+// This value is opaque to Win32StackFrameUnwinder.
+const PRUNTIME_FUNCTION TestUnwindFunctions::kNonNullRuntimeFunctionPointer =
+ reinterpret_cast<PRUNTIME_FUNCTION>(8);
+
+TestUnwindFunctions::TestUnwindFunctions()
+ : supplied_program_counter_(0),
+ custom_image_base_(0),
+ next_image_base_(kImageBaseIncrement),
+ next_lookup_returns_null_(false) {
+}
+
+PRUNTIME_FUNCTION TestUnwindFunctions::LookupFunctionEntry(
+ DWORD64 program_counter,
+ PDWORD64 image_base) {
+ supplied_program_counter_ = program_counter;
+ if (custom_image_base_) {
+ *image_base = custom_image_base_;
+ custom_image_base_ = 0;
+ } else {
+ *image_base = next_image_base_;
+ next_image_base_ += kImageBaseIncrement;
+ }
+ if (next_lookup_returns_null_) {
+ next_lookup_returns_null_ = false;
+ return nullptr;
+ }
+
+ return kNonNullRuntimeFunctionPointer;
+}
+
+void TestUnwindFunctions::VirtualUnwind(DWORD64 image_base,
+ DWORD64 program_counter,
+ PRUNTIME_FUNCTION runtime_function,
+ CONTEXT* context) {
+ EXPECT_EQ(next_image_base_ - kImageBaseIncrement, image_base);
+ EXPECT_EQ(supplied_program_counter_, program_counter);
+ // This function should only be called when LookupFunctionEntry returns a
+ // non-null value.
+ EXPECT_EQ(kNonNullRuntimeFunctionPointer, runtime_function);
+}
+
+void TestUnwindFunctions::SetNoUnwindInfoForNextFrame() {
+ next_lookup_returns_null_ = true;
+}
+
+void TestUnwindFunctions::SetImageBaseForNextFrame(DWORD64 image_base) {
+ next_image_base_ = image_base;
+}
+
+} // namespace
+
+class Win32StackFrameUnwinderTest : public testing::Test {
+ protected:
+ Win32StackFrameUnwinderTest() {}
+
+ // This exists so that Win32StackFrameUnwinder's constructor can be private
+ // with a single friend declaration of this test fixture.
+ scoped_ptr<Win32StackFrameUnwinder> CreateUnwinder();
+
+ TestUnwindFunctions unwind_functions_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Win32StackFrameUnwinderTest);
+};
+
+scoped_ptr<Win32StackFrameUnwinder>
+Win32StackFrameUnwinderTest::CreateUnwinder() {
+ return make_scoped_ptr(new Win32StackFrameUnwinder(&unwind_functions_));
+}
+
+// Checks the case where all frames have unwind information.
+TEST_F(Win32StackFrameUnwinderTest, FramesWithUnwindInfo) {
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+}
+
+// Checks that the CONTEXT's stack pointer gets popped when the top frame has no
+// unwind information.
+TEST_F(Win32StackFrameUnwinderTest, FrameAtTopWithoutUnwindInfo) {
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ const DWORD64 original_rsp = 128;
+ context.Rsp = original_rsp;
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ EXPECT_EQ(original_rsp, context.Rip);
+ EXPECT_EQ(original_rsp + 8, context.Rsp);
+
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+}
+
+// Checks that a frame below the top of the stack with missing unwind info
+// results in blacklisting the module.
+TEST_F(Win32StackFrameUnwinderTest, BlacklistedModule) {
+ const DWORD64 image_base_for_module_with_bad_function = 1024;
+ {
+ // First stack, with a bad function below the top of the stack.
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_module_with_bad_function);
+ EXPECT_FALSE(unwinder->TryUnwind(&context));
+ }
+
+ {
+ // Second stack; check that a function at the top of the stack without
+ // unwind info from the previously-seen module is blacklisted.
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_module_with_bad_function);
+ EXPECT_FALSE(unwinder->TryUnwind(&context));
+ }
+
+ {
+ // Third stack; check that a function at the top of the stack *with* unwind
+ // info from the previously-seen module is not blacklisted. Then check that
+ // functions below the top of the stack with unwind info are not
+ // blacklisted, regardless of whether they are in the previously-seen
+ // module.
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_module_with_bad_function);
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_module_with_bad_function);
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ }
+
+ {
+ // Fourth stack; check that a function at the top of the stack without
+ // unwind info and not from the previously-seen module is not
+ // blacklisted. Then check that functions below the top of the stack with
+ // unwind info are not blacklisted, regardless of whether they are in the
+ // previously-seen module.
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_module_with_bad_function);
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ }
+}
+
+// Checks that a frame below the top of the stack with missing unwind info does
+// not result in blacklisting the module if the first frame also was missing
+// unwind info. This ensures we don't blacklist an innocent module because the
+// first frame was bad but we didn't know it at the time.
+TEST_F(Win32StackFrameUnwinderTest, ModuleFromQuestionableFrameNotBlacklisted) {
+ const DWORD64 image_base_for_questionable_module = 2048;
+ {
+ // First stack, with both the first and second frames missing unwind info.
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_questionable_module);
+ EXPECT_FALSE(unwinder->TryUnwind(&context));
+ }
+
+ {
+ // Second stack; check that the questionable module was not blacklisted.
+ scoped_ptr<Win32StackFrameUnwinder> unwinder = CreateUnwinder();
+ CONTEXT context = {0};
+ unwind_functions_.SetNoUnwindInfoForNextFrame();
+ unwind_functions_.SetImageBaseForNextFrame(
+ image_base_for_questionable_module);
+ EXPECT_TRUE(unwinder->TryUnwind(&context));
+ }
+}
+
+} // namespace base
diff --git a/chromium/base/security_unittest.cc b/chromium/base/security_unittest.cc
index 07ba6f5a0f2..a9c73f7c592 100644
--- a/chromium/base/security_unittest.cc
+++ b/chromium/base/security_unittest.cc
@@ -23,47 +23,11 @@
#include <unistd.h>
#endif
-#if defined(OS_WIN)
-#include <new.h>
-#endif
-
using std::nothrow;
using std::numeric_limits;
namespace {
-#if defined(OS_WIN)
-// This is a permitted size but exhausts memory pretty quickly.
-const size_t kLargePermittedAllocation = 0x7FFFE000;
-
-int OnNoMemory(size_t) {
- _exit(1);
-}
-
-void ExhaustMemoryWithMalloc() {
- for (;;) {
- // Without the |volatile|, clang optimizes away the allocation.
- void* volatile buf = malloc(kLargePermittedAllocation);
- if (!buf)
- break;
- }
-}
-
-void ExhaustMemoryWithRealloc() {
- size_t size = kLargePermittedAllocation;
- void* buf = malloc(size);
- if (!buf)
- return;
- for (;;) {
- size += kLargePermittedAllocation;
- void* new_buf = realloc(buf, size);
- if (!buf)
- break;
- buf = new_buf;
- }
-}
-#endif
-
// This function acts as a compiler optimization barrier. We use it to
// prevent the compiler from making an expression a compile-time constant.
// We also use it so that the compiler doesn't discard certain return values
@@ -92,144 +56,16 @@ NOINLINE Type HideValueFromCompiler(volatile Type value) {
#define MALLOC_OVERFLOW_TEST(function) DISABLED_##function
#endif
-// TODO(jln): switch to std::numeric_limits<int>::max() when we switch to
-// C++11.
-const size_t kTooBigAllocSize = INT_MAX;
-
+#if defined(OS_LINUX) && defined(__x86_64__)
// Detect runtime TCMalloc bypasses.
bool IsTcMallocBypassed() {
-#if defined(OS_LINUX)
// This should detect a TCMalloc bypass from Valgrind.
char* g_slice = getenv("G_SLICE");
if (g_slice && !strcmp(g_slice, "always-malloc"))
return true;
-#endif
return false;
}
-
-bool CallocDiesOnOOM() {
-// The sanitizers' calloc dies on OOM instead of returning NULL.
-// The wrapper function in base/process_util_linux.cc that is used when we
-// compile without TCMalloc will just die on OOM instead of returning NULL.
-#if defined(ADDRESS_SANITIZER) || \
- defined(MEMORY_SANITIZER) || \
- defined(THREAD_SANITIZER) || \
- (defined(OS_LINUX) && defined(NO_TCMALLOC))
- return true;
-#else
- return false;
#endif
-}
-
-// Fake test that allow to know the state of TCMalloc by looking at bots.
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(IsTCMallocDynamicallyBypassed)) {
- printf("Malloc is dynamically bypassed: %s\n",
- IsTcMallocBypassed() ? "yes." : "no.");
-}
-
-// The MemoryAllocationRestrictions* tests test that we can not allocate a
-// memory range that cannot be indexed via an int. This is used to mitigate
-// vulnerabilities in libraries that use int instead of size_t. See
-// crbug.com/169327.
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsMalloc)) {
- if (!IsTcMallocBypassed()) {
- scoped_ptr<char, base::FreeDeleter> ptr(static_cast<char*>(
- HideValueFromCompiler(malloc(kTooBigAllocSize))));
- ASSERT_TRUE(!ptr);
- }
-}
-
-#if defined(GTEST_HAS_DEATH_TEST) && defined(OS_WIN)
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationMallocDeathTest)) {
- _set_new_handler(&OnNoMemory);
- _set_new_mode(1);
- {
- scoped_ptr<char, base::FreeDeleter> ptr;
- EXPECT_DEATH(ptr.reset(static_cast<char*>(
- HideValueFromCompiler(malloc(kTooBigAllocSize)))),
- "");
- ASSERT_TRUE(!ptr);
- }
- _set_new_handler(NULL);
- _set_new_mode(0);
-}
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationExhaustDeathTest)) {
- _set_new_handler(&OnNoMemory);
- _set_new_mode(1);
- {
- ASSERT_DEATH(ExhaustMemoryWithMalloc(), "");
- }
- _set_new_handler(NULL);
- _set_new_mode(0);
-}
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryReallocationExhaustDeathTest)) {
- _set_new_handler(&OnNoMemory);
- _set_new_mode(1);
- {
- ASSERT_DEATH(ExhaustMemoryWithRealloc(), "");
- }
- _set_new_handler(NULL);
- _set_new_mode(0);
-}
-#endif
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsCalloc)) {
- if (!IsTcMallocBypassed()) {
- scoped_ptr<char, base::FreeDeleter> ptr(static_cast<char*>(
- HideValueFromCompiler(calloc(kTooBigAllocSize, 1))));
- ASSERT_TRUE(!ptr);
- }
-}
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsRealloc)) {
- if (!IsTcMallocBypassed()) {
- char* orig_ptr = static_cast<char*>(malloc(1));
- ASSERT_TRUE(orig_ptr);
- scoped_ptr<char, base::FreeDeleter> ptr(static_cast<char*>(
- HideValueFromCompiler(realloc(orig_ptr, kTooBigAllocSize))));
- ASSERT_TRUE(!ptr);
- // If realloc() did not succeed, we need to free orig_ptr.
- free(orig_ptr);
- }
-}
-
-typedef struct {
- char large_array[kTooBigAllocSize];
-} VeryLargeStruct;
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsNew)) {
- if (!IsTcMallocBypassed()) {
- scoped_ptr<VeryLargeStruct> ptr(
- HideValueFromCompiler(new (nothrow) VeryLargeStruct));
- ASSERT_TRUE(!ptr);
- }
-}
-
-#if defined(GTEST_HAS_DEATH_TEST) && defined(OS_WIN)
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationNewDeathTest)) {
- _set_new_handler(&OnNoMemory);
- {
- scoped_ptr<VeryLargeStruct> ptr;
- EXPECT_DEATH(
- ptr.reset(HideValueFromCompiler(new (nothrow) VeryLargeStruct)), "");
- ASSERT_TRUE(!ptr);
- }
- _set_new_handler(NULL);
-}
-#endif
-
-TEST(SecurityTest, MALLOC_OVERFLOW_TEST(MemoryAllocationRestrictionsNewArray)) {
- if (!IsTcMallocBypassed()) {
- scoped_ptr<char[]> ptr(
- HideValueFromCompiler(new (nothrow) char[kTooBigAllocSize]));
- ASSERT_TRUE(!ptr);
- }
-}
-
-// The tests bellow check for overflows in new[] and calloc().
// There are platforms where these tests are known to fail. We would like to
// be able to easily check the status on the bots, but marking tests as
@@ -287,34 +123,6 @@ TEST(SecurityTest, MAYBE_NewOverflow) {
#endif // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
}
-// Call calloc(), eventually free the memory and return whether or not
-// calloc() did succeed.
-bool CallocReturnsNull(size_t nmemb, size_t size) {
- scoped_ptr<char, base::FreeDeleter> array_pointer(
- static_cast<char*>(calloc(nmemb, size)));
- // We need the call to HideValueFromCompiler(): we have seen LLVM
- // optimize away the call to calloc() entirely and assume the pointer to not
- // be NULL.
- return HideValueFromCompiler(array_pointer.get()) == NULL;
-}
-
-// Test if calloc() can overflow.
-TEST(SecurityTest, CallocOverflow) {
- const size_t kArraySize = 4096;
- const size_t kMaxSizeT = numeric_limits<size_t>::max();
- const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
- if (!CallocDiesOnOOM()) {
- EXPECT_TRUE(CallocReturnsNull(kArraySize, kArraySize2));
- EXPECT_TRUE(CallocReturnsNull(kArraySize2, kArraySize));
- } else {
- // It's also ok for calloc to just terminate the process.
-#if defined(GTEST_HAS_DEATH_TEST)
- EXPECT_DEATH(CallocReturnsNull(kArraySize, kArraySize2), "");
- EXPECT_DEATH(CallocReturnsNull(kArraySize2, kArraySize), "");
-#endif // GTEST_HAS_DEATH_TEST
- }
-}
-
#if defined(OS_LINUX) && defined(__x86_64__)
// Check if ptr1 and ptr2 are separated by less than size chars.
bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) {
diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc
index b6b65d2a3bf..0f4f3813321 100644
--- a/chromium/base/strings/string_number_conversions.cc
+++ b/chromium/base/strings/string_number_conversions.cc
@@ -12,6 +12,8 @@
#include <limits>
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
#include "base/scoped_clear_errno.h"
#include "base/strings/utf_string_conversions.h"
#include "base/third_party/dmg_fp/dmg_fp.h"
@@ -20,82 +22,37 @@ namespace base {
namespace {
-template <typename STR, typename INT, typename UINT, bool NEG>
+template <typename STR, typename INT>
struct IntToStringT {
- // This is to avoid a compiler warning about unary minus on unsigned type.
- // For example, say you had the following code:
- // template <typename INT>
- // INT abs(INT value) { return value < 0 ? -value : value; }
- // Even though if INT is unsigned, it's impossible for value < 0, so the
- // unary minus will never be taken, the compiler will still generate a
- // warning. We do a little specialization dance...
- template <typename INT2, typename UINT2, bool NEG2>
- struct ToUnsignedT {};
-
- template <typename INT2, typename UINT2>
- struct ToUnsignedT<INT2, UINT2, false> {
- static UINT2 ToUnsigned(INT2 value) {
- return static_cast<UINT2>(value);
- }
- };
-
- template <typename INT2, typename UINT2>
- struct ToUnsignedT<INT2, UINT2, true> {
- static UINT2 ToUnsigned(INT2 value) {
- if (value >= 0) {
- return value;
- } else {
- // Avoid integer overflow when negating INT_MIN.
- return static_cast<UINT2>(-(value + 1)) + 1;
- }
- }
- };
-
- // This set of templates is very similar to the above templates, but
- // for testing whether an integer is negative.
- template <typename INT2, bool NEG2>
- struct TestNegT {};
- template <typename INT2>
- struct TestNegT<INT2, false> {
- static bool TestNeg(INT2 value) {
- // value is unsigned, and can never be negative.
- return false;
- }
- };
- template <typename INT2>
- struct TestNegT<INT2, true> {
- static bool TestNeg(INT2 value) {
- return value < 0;
- }
- };
-
static STR IntToString(INT value) {
// log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
// So round up to allocate 3 output characters per byte, plus 1 for '-'.
- const int kOutputBufSize = 3 * sizeof(INT) + 1;
+ const size_t kOutputBufSize =
+ 3 * sizeof(INT) + std::numeric_limits<INT>::is_signed;
- // Allocate the whole string right away, we will right back to front, and
+ // Create the string in a temporary buffer, write it back to front, and
// then return the substr of what we ended up using.
- STR outbuf(kOutputBufSize, 0);
+ using CHR = typename STR::value_type;
+ CHR outbuf[kOutputBufSize];
- bool is_neg = TestNegT<INT, NEG>::TestNeg(value);
- // Even though is_neg will never be true when INT is parameterized as
- // unsigned, even the presence of the unary operation causes a warning.
- UINT res = ToUnsignedT<INT, UINT, NEG>::ToUnsigned(value);
+ // The ValueOrDie call below can never fail, because UnsignedAbs is valid
+ // for all valid inputs.
+ auto res = CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
- typename STR::iterator it(outbuf.end());
+ CHR* end = outbuf + kOutputBufSize;
+ CHR* i = end;
do {
- --it;
- DCHECK(it != outbuf.begin());
- *it = static_cast<typename STR::value_type>((res % 10) + '0');
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>((res % 10) + '0');
res /= 10;
} while (res != 0);
- if (is_neg) {
- --it;
- DCHECK(it != outbuf.begin());
- *it = static_cast<typename STR::value_type>('-');
+ if (IsValueNegative(value)) {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>('-');
}
- return STR(it, outbuf.end());
+ return STR(i, end);
}
};
@@ -373,47 +330,43 @@ bool String16ToIntImpl(const StringPiece16& input, VALUE* output) {
} // namespace
std::string IntToString(int value) {
- return IntToStringT<std::string, int, unsigned int, true>::
- IntToString(value);
+ return IntToStringT<std::string, int>::IntToString(value);
}
string16 IntToString16(int value) {
- return IntToStringT<string16, int, unsigned int, true>::
- IntToString(value);
+ return IntToStringT<string16, int>::IntToString(value);
}
std::string UintToString(unsigned int value) {
- return IntToStringT<std::string, unsigned int, unsigned int, false>::
- IntToString(value);
+ return IntToStringT<std::string, unsigned int>::IntToString(value);
}
string16 UintToString16(unsigned int value) {
- return IntToStringT<string16, unsigned int, unsigned int, false>::
- IntToString(value);
+ return IntToStringT<string16, unsigned int>::IntToString(value);
}
std::string Int64ToString(int64 value) {
- return IntToStringT<std::string, int64, uint64, true>::IntToString(value);
+ return IntToStringT<std::string, int64>::IntToString(value);
}
string16 Int64ToString16(int64 value) {
- return IntToStringT<string16, int64, uint64, true>::IntToString(value);
+ return IntToStringT<string16, int64>::IntToString(value);
}
std::string Uint64ToString(uint64 value) {
- return IntToStringT<std::string, uint64, uint64, false>::IntToString(value);
+ return IntToStringT<std::string, uint64>::IntToString(value);
}
string16 Uint64ToString16(uint64 value) {
- return IntToStringT<string16, uint64, uint64, false>::IntToString(value);
+ return IntToStringT<string16, uint64>::IntToString(value);
}
std::string SizeTToString(size_t value) {
- return IntToStringT<std::string, size_t, size_t, false>::IntToString(value);
+ return IntToStringT<std::string, size_t>::IntToString(value);
}
string16 SizeTToString16(size_t value) {
- return IntToStringT<string16, size_t, size_t, false>::IntToString(value);
+ return IntToStringT<string16, size_t>::IntToString(value);
}
std::string DoubleToString(double value) {
diff --git a/chromium/base/strings/string_number_conversions.h b/chromium/base/strings/string_number_conversions.h
index 050e627a1a1..cf1c3b467dd 100644
--- a/chromium/base/strings/string_number_conversions.h
+++ b/chromium/base/strings/string_number_conversions.h
@@ -64,6 +64,8 @@ BASE_EXPORT std::string DoubleToString(double value);
// - No characters parseable as a number at the beginning of the string.
// |*output| will be set to 0.
// - Empty string. |*output| will be set to 0.
+// WARNING: Will write to |output| even when returning false.
+// Read the comments above carefully.
BASE_EXPORT bool StringToInt(const StringPiece& input, int* output);
BASE_EXPORT bool StringToInt(const StringPiece16& input, int* output);
@@ -81,10 +83,12 @@ BASE_EXPORT bool StringToSizeT(const StringPiece16& input, size_t* output);
// For floating-point conversions, only conversions of input strings in decimal
// form are defined to work. Behavior with strings representing floating-point
-// numbers in hexadecimal, and strings representing non-fininte values (such as
+// numbers in hexadecimal, and strings representing non-finite values (such as
// NaN and inf) is undefined. Otherwise, these behave the same as the integral
// variants. This expects the input string to NOT be specific to the locale.
// If your input is locale specific, use ICU to read the number.
+// WARNING: Will write to |output| even when returning false.
+// Read the comments here and above StringToInt() carefully.
BASE_EXPORT bool StringToDouble(const std::string& input, double* output);
// Hex encoding ----------------------------------------------------------------
diff --git a/chromium/base/strings/string_piece.cc b/chromium/base/strings/string_piece.cc
index 4c7f1122f2d..99975725afc 100644
--- a/chromium/base/strings/string_piece.cc
+++ b/chromium/base/strings/string_piece.cc
@@ -8,6 +8,8 @@
#include <algorithm>
#include <ostream>
+#include "base/logging.h"
+
namespace base {
namespace {
@@ -433,5 +435,16 @@ StringPiece16 substr(const StringPiece16& self,
return substrT(self, pos, n);
}
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+void AssertIteratorsInOrder(std::string::const_iterator begin,
+ std::string::const_iterator end) {
+ DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+}
+void AssertIteratorsInOrder(string16::const_iterator begin,
+ string16::const_iterator end) {
+ DCHECK(begin <= end) << "StringPiece iterators swapped or invalid.";
+}
+#endif
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/strings/string_piece.h b/chromium/base/strings/string_piece.h
index a83b7d8f661..c1af175406c 100644
--- a/chromium/base/strings/string_piece.h
+++ b/chromium/base/strings/string_piece.h
@@ -143,6 +143,14 @@ BASE_EXPORT StringPiece16 substr(const StringPiece16& self,
size_t pos,
size_t n);
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+// Asserts that begin <= end to catch some errors with iterator usage.
+BASE_EXPORT void AssertIteratorsInOrder(std::string::const_iterator begin,
+ std::string::const_iterator end);
+BASE_EXPORT void AssertIteratorsInOrder(string16::const_iterator begin,
+ string16::const_iterator end);
+#endif
+
} // namespace internal
// BasicStringPiece ------------------------------------------------------------
@@ -180,9 +188,18 @@ template <typename STRING_TYPE> class BasicStringPiece {
BasicStringPiece(const value_type* offset, size_type len)
: ptr_(offset), length_(len) {}
BasicStringPiece(const typename STRING_TYPE::const_iterator& begin,
- const typename STRING_TYPE::const_iterator& end)
- : ptr_((end > begin) ? &(*begin) : NULL),
- length_((end > begin) ? (size_type)(end - begin) : 0) {}
+ const typename STRING_TYPE::const_iterator& end) {
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+ // This assertion is done out-of-line to avoid bringing in logging.h and
+ // instantiating logging macros for every instantiation.
+ internal::AssertIteratorsInOrder(begin, end);
+#endif
+ length_ = static_cast<size_t>(std::distance(begin, end));
+
+ // The length test before assignment is to avoid dereferencing an iterator
+ // that may point to the end() of a string.
+ ptr_ = length_ > 0 ? &*begin : nullptr;
+ }
// data() may return a pointer to a buffer with embedded NULs, and the
// returned buffer may or may not be null terminated. Therefore it is
diff --git a/chromium/base/strings/string_split.cc b/chromium/base/strings/string_split.cc
index cc62a6f4b47..4253e2f8f86 100644
--- a/chromium/base/strings/string_split.cc
+++ b/chromium/base/strings/string_split.cc
@@ -99,52 +99,57 @@ static std::vector<OutputStringType> SplitStringT(
return result;
}
-bool SplitStringIntoKeyValue(const std::string& line,
- char key_value_delimiter,
- std::string* key,
- std::string* value) {
- key->clear();
- value->clear();
+bool AppendStringKeyValue(StringPiece input,
+ char delimiter,
+ StringPairs* result) {
+ // Always append a new item regardless of success (it might be empty). The
+ // below code will copy the strings directly into the result pair.
+ result->resize(result->size() + 1);
+ auto& result_pair = result->back();
// Find the delimiter.
- size_t end_key_pos = line.find_first_of(key_value_delimiter);
+ size_t end_key_pos = input.find_first_of(delimiter);
if (end_key_pos == std::string::npos) {
- DVLOG(1) << "cannot find delimiter in: " << line;
- return false; // no delimiter
+ DVLOG(1) << "cannot find delimiter in: " << input;
+ return false; // No delimiter.
}
- key->assign(line, 0, end_key_pos);
+ input.substr(0, end_key_pos).CopyToString(&result_pair.first);
// Find the value string.
- std::string remains(line, end_key_pos, line.size() - end_key_pos);
- size_t begin_value_pos = remains.find_first_not_of(key_value_delimiter);
- if (begin_value_pos == std::string::npos) {
- DVLOG(1) << "cannot parse value from line: " << line;
- return false; // no value
+ StringPiece remains = input.substr(end_key_pos, input.size() - end_key_pos);
+ size_t begin_value_pos = remains.find_first_not_of(delimiter);
+ if (begin_value_pos == StringPiece::npos) {
+ DVLOG(1) << "cannot parse value from input: " << input;
+ return false; // No value.
}
- value->assign(remains, begin_value_pos, remains.size() - begin_value_pos);
+ remains.substr(begin_value_pos, remains.size() - begin_value_pos)
+ .CopyToString(&result_pair.second);
+
return true;
}
-template <typename STR>
-void SplitStringUsingSubstrT(const STR& str,
- const STR& s,
- std::vector<STR>* r) {
- r->clear();
- typename STR::size_type begin_index = 0;
+template <typename Str>
+void SplitStringUsingSubstrT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> delimiter,
+ std::vector<Str>* result) {
+ using Piece = BasicStringPiece<Str>;
+ using size_type = typename Piece::size_type;
+
+ result->clear();
+ size_type begin_index = 0;
while (true) {
- const typename STR::size_type end_index = str.find(s, begin_index);
- if (end_index == STR::npos) {
- const STR term = str.substr(begin_index);
- STR tmp;
- TrimWhitespace(term, TRIM_ALL, &tmp);
- r->push_back(tmp);
+ size_type end_index = input.find(delimiter, begin_index);
+ if (end_index == Piece::npos) {
+ // No delimiter, use the rest of the string.
+ Piece term = TrimString(input.substr(begin_index),
+ WhitespaceForType<Str>(), TRIM_ALL);
+ result->push_back(term.as_string());
return;
}
- const STR term = str.substr(begin_index, end_index - begin_index);
- STR tmp;
- TrimWhitespace(term, TRIM_ALL, &tmp);
- r->push_back(tmp);
- begin_index = end_index + s.size();
+ Piece term = TrimString(input.substr(begin_index, end_index - begin_index),
+ WhitespaceForType<Str>(), TRIM_ALL);
+ result->push_back(term.as_string());
+ begin_index = end_index + delimiter.size();
}
}
@@ -198,110 +203,38 @@ std::vector<StringPiece16> SplitStringPiece(StringPiece16 input,
input, separators, whitespace, result_type);
}
-void SplitString(const string16& str,
- char16 c,
- std::vector<string16>* result) {
- DCHECK(CBU16_IS_SINGLE(c));
- *result = SplitStringT<string16, string16, char16>(
- str, c, TRIM_WHITESPACE, SPLIT_WANT_ALL);
-
- // Backward-compat hack: The old SplitString implementation would keep
- // empty substrings, for example:
- // "a,,b" -> ["a", "", "b"]
- // "a, ,b" -> ["a", "", "b"]
- // which the current code also does. But the old one would discard them when
- // the only result was that empty string:
- // " " -> []
- // In the latter case, our new code will give [""]
- if (result->size() == 1 && (*result)[0].empty())
- result->clear();
-}
-
-void SplitString(const std::string& str,
- char c,
- std::vector<std::string>* result) {
-#if CHAR_MIN < 0
- DCHECK_GE(c, 0);
-#endif
- DCHECK_LT(c, 0x7F);
- *result = SplitStringT<std::string, std::string, char>(
- str, c, TRIM_WHITESPACE, SPLIT_WANT_ALL);
-
- // Backward-compat hack, see above.
- if (result->size() == 1 && (*result)[0].empty())
- result->clear();
-}
-
-bool SplitStringIntoKeyValuePairs(const std::string& line,
+bool SplitStringIntoKeyValuePairs(StringPiece input,
char key_value_delimiter,
char key_value_pair_delimiter,
StringPairs* key_value_pairs) {
key_value_pairs->clear();
- std::vector<std::string> pairs;
- SplitString(line, key_value_pair_delimiter, &pairs);
+ std::vector<StringPiece> pairs = SplitStringPiece(
+ input, std::string(1, key_value_pair_delimiter),
+ TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+ key_value_pairs->reserve(pairs.size());
bool success = true;
- for (size_t i = 0; i < pairs.size(); ++i) {
- // Don't add empty pairs into the result.
- if (pairs[i].empty())
- continue;
-
- std::string key;
- std::string value;
- if (!SplitStringIntoKeyValue(pairs[i], key_value_delimiter, &key, &value)) {
+ for (const StringPiece& pair : pairs) {
+ if (!AppendStringKeyValue(pair, key_value_delimiter, key_value_pairs)) {
// Don't return here, to allow for pairs without associated
// value or key; just record that the split failed.
success = false;
}
- key_value_pairs->push_back(make_pair(key, value));
}
return success;
}
-void SplitStringUsingSubstr(const string16& str,
- const string16& s,
- std::vector<string16>* r) {
- SplitStringUsingSubstrT(str, s, r);
+void SplitStringUsingSubstr(StringPiece16 input,
+ StringPiece16 delimiter,
+ std::vector<string16>* result) {
+ SplitStringUsingSubstrT(input, delimiter, result);
}
-void SplitStringUsingSubstr(const std::string& str,
- const std::string& s,
- std::vector<std::string>* r) {
- SplitStringUsingSubstrT(str, s, r);
-}
-
-void SplitStringDontTrim(StringPiece16 str,
- char16 c,
- std::vector<string16>* result) {
- DCHECK(CBU16_IS_SINGLE(c));
- *result = SplitStringT<string16, string16, char16>(
- str, c, KEEP_WHITESPACE, SPLIT_WANT_ALL);
-}
-
-void SplitStringDontTrim(StringPiece str,
- char c,
- std::vector<std::string>* result) {
-#if CHAR_MIN < 0
- DCHECK_GE(c, 0);
-#endif
- DCHECK_LT(c, 0x7F);
- *result = SplitStringT<std::string, std::string, char>(
- str, c, KEEP_WHITESPACE, SPLIT_WANT_ALL);
-}
-
-void SplitStringAlongWhitespace(const string16& str,
- std::vector<string16>* result) {
- *result = SplitStringT<string16, string16, StringPiece16>(
- str, StringPiece16(kWhitespaceASCIIAs16),
- TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
-}
-
-void SplitStringAlongWhitespace(const std::string& str,
- std::vector<std::string>* result) {
- *result = SplitStringT<std::string, std::string, StringPiece>(
- str, StringPiece(kWhitespaceASCII),
- TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+void SplitStringUsingSubstr(StringPiece input,
+ StringPiece delimiter,
+ std::vector<std::string>* result) {
+ SplitStringUsingSubstrT(input, delimiter, result);
}
} // namespace base
diff --git a/chromium/base/strings/string_split.h b/chromium/base/strings/string_split.h
index dc4108d327f..2a0c79561b8 100644
--- a/chromium/base/strings/string_split.h
+++ b/chromium/base/strings/string_split.h
@@ -83,7 +83,7 @@ using StringPairs = std::vector<std::pair<std::string, std::string>>;
// removes whitespace leading each key and trailing each value. Returns true
// only if each pair has a non-empty key and value. |key_value_pairs| will
// include ("","") pairs for entries without |key_value_delimiter|.
-BASE_EXPORT bool SplitStringIntoKeyValuePairs(const std::string& line,
+BASE_EXPORT bool SplitStringIntoKeyValuePairs(StringPiece input,
char key_value_delimiter,
char key_value_pair_delimiter,
StringPairs* key_value_pairs);
@@ -94,64 +94,12 @@ BASE_EXPORT bool SplitStringIntoKeyValuePairs(const std::string& line,
// TODO(brettw) this should probably be changed and expanded to provide a
// mirror of the SplitString[Piece] API above, just with the different
// delimiter handling.
-BASE_EXPORT void SplitStringUsingSubstr(const string16& str,
- const string16& s,
- std::vector<string16>* r);
-BASE_EXPORT void SplitStringUsingSubstr(const std::string& str,
- const std::string& s,
- std::vector<std::string>* r);
-
-// -----------------------------------------------------------------------------
-// Backwards-compat wrappers
-//
-// New code should use one of the more general variants above.
-// TODO(brettw) remove these and convert to the versions above.
-
-// Splits |str| into a vector of strings delimited by |c|, placing the results
-// in |r|. If several instances of |c| are contiguous, or if |str| begins with
-// or ends with |c|, then an empty string is inserted.
-//
-// Every substring is trimmed of any leading or trailing white space.
-// NOTE: |c| must be in BMP (Basic Multilingual Plane)
-BASE_EXPORT void SplitString(const string16& str,
- char16 c,
- std::vector<string16>* r);
-
-// |str| should not be in a multi-byte encoding like Shift-JIS or GBK in which
-// the trailing byte of a multi-byte character can be in the ASCII range.
-// UTF-8, and other single/multi-byte ASCII-compatible encodings are OK.
-// Note: |c| must be in the ASCII range.
-BASE_EXPORT void SplitString(const std::string& str,
- char c,
- std::vector<std::string>* r);
-
-// The same as SplitString, but don't trim white space.
-// NOTE: |c| must be in BMP (Basic Multilingual Plane)
-BASE_EXPORT void SplitStringDontTrim(StringPiece16 str,
- char16 c,
- std::vector<string16>* r);
-// |str| should not be in a multi-byte encoding like Shift-JIS or GBK in which
-// the trailing byte of a multi-byte character can be in the ASCII range.
-// UTF-8, and other single/multi-byte ASCII-compatible encodings are OK.
-// Note: |c| must be in the ASCII range.
-BASE_EXPORT void SplitStringDontTrim(StringPiece str,
- char c,
- std::vector<std::string>* result);
-
-// WARNING: this uses whitespace as defined by the HTML5 spec (ASCII whitespace
-// only).
-//
-// The difference between this and calling SplitString with the whitespace
-// characters as separators is the treatment of the first element when the
-// string starts with whitespace.
-//
-// Input SplitString SplitStringAlongWhitespace
-// --------------------------------------------------------
-// " a " "", "a" "a"
-BASE_EXPORT void SplitStringAlongWhitespace(const string16& str,
- std::vector<string16>* result);
-BASE_EXPORT void SplitStringAlongWhitespace(const std::string& str,
- std::vector<std::string>* result);
+BASE_EXPORT void SplitStringUsingSubstr(StringPiece16 input,
+ StringPiece16 delimiter,
+ std::vector<string16>* result);
+BASE_EXPORT void SplitStringUsingSubstr(StringPiece input,
+ StringPiece delimiter,
+ std::vector<std::string>* result);
} // namespace base
diff --git a/chromium/base/strings/string_split_unittest.cc b/chromium/base/strings/string_split_unittest.cc
index c745ab58f08..0416776b54c 100644
--- a/chromium/base/strings/string_split_unittest.cc
+++ b/chromium/base/strings/string_split_unittest.cc
@@ -4,6 +4,7 @@
#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -12,23 +13,6 @@ using ::testing::ElementsAre;
namespace base {
-namespace {
-
-#if !defined(WCHAR_T_IS_UTF16)
-// Overload SplitString with a wide-char version to make it easier to
-// test the string16 version with wide character literals.
-void SplitString(const std::wstring& str,
- wchar_t c,
- std::vector<std::wstring>* result) {
- std::vector<string16> result16;
- SplitString(WideToUTF16(str), c, &result16);
- for (size_t i = 0; i < result16.size(); ++i)
- result->push_back(UTF16ToWide(result16[i]));
-}
-#endif
-
-} // anonymous namespace
-
class SplitStringIntoKeyValuePairsTest : public testing::Test {
protected:
base::StringPairs kv_pairs;
@@ -243,77 +227,6 @@ TEST(StringUtilTest, SplitString_WhitespaceAndResultType) {
ASSERT_TRUE(r.empty());
}
-TEST(StringUtilTest, SplitString_Legacy) {
- std::vector<std::wstring> r;
-
- SplitString(std::wstring(), L',', &r);
- EXPECT_EQ(0U, r.size());
- r.clear();
-
- SplitString(L"a,b,c", L',', &r);
- ASSERT_EQ(3U, r.size());
- EXPECT_EQ(r[0], L"a");
- EXPECT_EQ(r[1], L"b");
- EXPECT_EQ(r[2], L"c");
- r.clear();
-
- SplitString(L"a, b, c", L',', &r);
- ASSERT_EQ(3U, r.size());
- EXPECT_EQ(r[0], L"a");
- EXPECT_EQ(r[1], L"b");
- EXPECT_EQ(r[2], L"c");
- r.clear();
-
- SplitString(L"a,,c", L',', &r);
- ASSERT_EQ(3U, r.size());
- EXPECT_EQ(r[0], L"a");
- EXPECT_EQ(r[1], L"");
- EXPECT_EQ(r[2], L"c");
- r.clear();
-
- SplitString(L"a, ,c", L',', &r);
- ASSERT_EQ(3U, r.size());
- EXPECT_EQ(r[0], L"a");
- EXPECT_EQ(r[1], L"");
- EXPECT_EQ(r[2], L"c");
- r.clear();
-
- SplitString(L" ", L'*', &r);
- EXPECT_EQ(0U, r.size());
- r.clear();
-
- SplitString(L"foo", L'*', &r);
- ASSERT_EQ(1U, r.size());
- EXPECT_EQ(r[0], L"foo");
- r.clear();
-
- SplitString(L"foo ,", L',', &r);
- ASSERT_EQ(2U, r.size());
- EXPECT_EQ(r[0], L"foo");
- EXPECT_EQ(r[1], L"");
- r.clear();
-
- SplitString(L",", L',', &r);
- ASSERT_EQ(2U, r.size());
- EXPECT_EQ(r[0], L"");
- EXPECT_EQ(r[1], L"");
- r.clear();
-
- SplitString(L"\t\ta\t", L'\t', &r);
- ASSERT_EQ(4U, r.size());
- EXPECT_EQ(r[0], L"");
- EXPECT_EQ(r[1], L"");
- EXPECT_EQ(r[2], L"a");
- EXPECT_EQ(r[3], L"");
- r.clear();
-
- SplitString(L"\ta\t\nb\tcc", L'\n', &r);
- ASSERT_EQ(2U, r.size());
- EXPECT_EQ(r[0], L"a");
- EXPECT_EQ(r[1], L"b\tcc");
- r.clear();
-}
-
TEST(SplitStringUsingSubstrTest, StringWithNoDelimiter) {
std::vector<std::string> results;
SplitStringUsingSubstr("alongwordwithnodelimiter", "DELIMITER", &results);
@@ -352,21 +265,23 @@ TEST(SplitStringUsingSubstrTest, TrailingDelimitersSkipped) {
results, ElementsAre("un", "deux", "trois", "quatre", "", "", ""));
}
-TEST(StringSplitTest, StringSplitDontTrim) {
+TEST(StringSplitTest, StringSplitKeepWhitespace) {
std::vector<std::string> r;
- SplitStringDontTrim(" ", '*', &r);
+ r = SplitString(" ", "*", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
ASSERT_EQ(1U, r.size());
EXPECT_EQ(r[0], " ");
- SplitStringDontTrim("\t \ta\t ", '\t', &r);
+ r = SplitString("\t \ta\t ", "\t", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_ALL);
ASSERT_EQ(4U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], " ");
EXPECT_EQ(r[2], "a");
EXPECT_EQ(r[3], " ");
- SplitStringDontTrim("\ta\t\nb\tcc", '\n', &r);
+ r = SplitString("\ta\t\nb\tcc", "\n", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_ALL);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "\ta\t");
EXPECT_EQ(r[1], "b\tcc");
@@ -394,8 +309,9 @@ TEST(StringSplitTest, SplitStringAlongWhitespace) {
{ "b\t at", 2, "b", "at" },
};
for (size_t i = 0; i < arraysize(data); ++i) {
- std::vector<std::string> results;
- SplitStringAlongWhitespace(data[i].input, &results);
+ std::vector<std::string> results = base::SplitString(
+ data[i].input, kWhitespaceASCII, base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
ASSERT_EQ(data[i].expected_result_count, results.size());
if (data[i].expected_result_count > 0)
ASSERT_EQ(data[i].output1, results[0]);
diff --git a/chromium/base/strings/string_util.cc b/chromium/base/strings/string_util.cc
index 5b727ce0b4e..d7a1d54c722 100644
--- a/chromium/base/strings/string_util.cc
+++ b/chromium/base/strings/string_util.cc
@@ -27,9 +27,7 @@
#include "base/third_party/icu/icu_utf.h"
#include "build/build_config.h"
-// Remove when this entire file is in the base namespace.
-using base::char16;
-using base::string16;
+namespace base {
namespace {
@@ -80,13 +78,13 @@ template<typename T> inline T* AlignToMachineWord(T* pointer) {
}
template<size_t size, typename CharacterType> struct NonASCIIMask;
-template<> struct NonASCIIMask<4, base::char16> {
+template<> struct NonASCIIMask<4, char16> {
static inline uint32_t value() { return 0xFF80FF80U; }
};
template<> struct NonASCIIMask<4, char> {
static inline uint32_t value() { return 0x80808080U; }
};
-template<> struct NonASCIIMask<8, base::char16> {
+template<> struct NonASCIIMask<8, char16> {
static inline uint64_t value() { return 0xFF80FF80FF80FF80ULL; }
};
template<> struct NonASCIIMask<8, char> {
@@ -101,9 +99,19 @@ template<> struct NonASCIIMask<8, wchar_t> {
};
#endif // WCHAR_T_IS_UTF32
-} // namespace
+// DO NOT USE. http://crbug.com/24917
+//
+// tolower() will given incorrect results for non-ASCII characters. Use the
+// ASCII version, base::i18n::ToLower, or base::i18n::FoldCase. This is here
+// for backwards-compat for StartsWith until such calls can be updated.
+struct CaseInsensitiveCompareDeprecated {
+ public:
+ bool operator()(char16 x, char16 y) const {
+ return tolower(x) == tolower(y);
+ }
+};
-namespace base {
+} // namespace
bool IsWprintfFormatPortable(const wchar_t* format) {
for (const wchar_t* position = format; *position != '\0'; ++position) {
@@ -140,6 +148,91 @@ bool IsWprintfFormatPortable(const wchar_t* format) {
return true;
}
+namespace {
+
+template<typename StringType>
+StringType ToLowerASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToLowerASCII(str[i]));
+ return ret;
+}
+
+template<typename StringType>
+StringType ToUpperASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToUpperASCII(str[i]));
+ return ret;
+}
+
+} // namespace
+
+std::string ToLowerASCII(StringPiece str) {
+ return ToLowerASCIIImpl<std::string>(str);
+}
+
+string16 ToLowerASCII(StringPiece16 str) {
+ return ToLowerASCIIImpl<string16>(str);
+}
+
+std::string ToUpperASCII(StringPiece str) {
+ return ToUpperASCIIImpl<std::string>(str);
+}
+
+string16 ToUpperASCII(StringPiece16 str) {
+ return ToUpperASCIIImpl<string16>(str);
+}
+
+template<class StringType>
+int CompareCaseInsensitiveASCIIT(BasicStringPiece<StringType> a,
+ BasicStringPiece<StringType> b) {
+ // Find the first characters that aren't equal and compare them. If the end
+ // of one of the strings is found before a nonequal character, the lengths
+ // of the strings are compared.
+ size_t i = 0;
+ while (i < a.length() && i < b.length()) {
+ typename StringType::value_type lower_a = ToLowerASCII(a[i]);
+ typename StringType::value_type lower_b = ToLowerASCII(b[i]);
+ if (lower_a < lower_b)
+ return -1;
+ if (lower_a > lower_b)
+ return 1;
+ i++;
+ }
+
+ // End of one string hit before finding a different character. Expect the
+ // common case to be "strings equal" at this point so check that first.
+ if (a.length() == b.length())
+ return 0;
+
+ if (a.length() < b.length())
+ return -1;
+ return 1;
+}
+
+int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ return CompareCaseInsensitiveASCIIT<std::string>(a, b);
+}
+
+int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+ return CompareCaseInsensitiveASCIIT<string16>(a, b);
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) {
+ if (a.length() != b.length())
+ return false;
+ return CompareCaseInsensitiveASCIIT<std::string>(a, b) == 0;
+}
+
+bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
+ if (a.length() != b.length())
+ return false;
+ return CompareCaseInsensitiveASCIIT<string16>(a, b) == 0;
+}
+
const std::string& EmptyString() {
return EmptyStrings::GetInstance()->s;
}
@@ -169,27 +262,27 @@ bool ReplaceCharsT(const STR& input,
}
bool ReplaceChars(const string16& input,
- const base::StringPiece16& replace_chars,
+ const StringPiece16& replace_chars,
const string16& replace_with,
string16* output) {
return ReplaceCharsT(input, replace_chars.as_string(), replace_with, output);
}
bool ReplaceChars(const std::string& input,
- const base::StringPiece& replace_chars,
+ const StringPiece& replace_chars,
const std::string& replace_with,
std::string* output) {
return ReplaceCharsT(input, replace_chars.as_string(), replace_with, output);
}
bool RemoveChars(const string16& input,
- const base::StringPiece16& remove_chars,
+ const StringPiece16& remove_chars,
string16* output) {
return ReplaceChars(input, remove_chars.as_string(), string16(), output);
}
bool RemoveChars(const std::string& input,
- const base::StringPiece& remove_chars,
+ const StringPiece& remove_chars,
std::string* output) {
return ReplaceChars(input, remove_chars.as_string(), std::string(), output);
}
@@ -231,13 +324,13 @@ TrimPositions TrimStringT(const Str& input,
}
bool TrimString(const string16& input,
- base::StringPiece16 trim_chars,
+ StringPiece16 trim_chars,
string16* output) {
return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
}
bool TrimString(const std::string& input,
- base::StringPiece trim_chars,
+ StringPiece trim_chars,
std::string* output) {
return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
}
@@ -254,13 +347,13 @@ BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
}
StringPiece16 TrimString(StringPiece16 input,
- const base::StringPiece16& trim_chars,
+ const StringPiece16& trim_chars,
TrimPositions positions) {
return TrimStringPieceT(input, trim_chars, positions);
}
StringPiece TrimString(StringPiece input,
- const base::StringPiece& trim_chars,
+ const StringPiece& trim_chars,
TrimPositions positions) {
return TrimStringPieceT(input, trim_chars, positions);
}
@@ -307,8 +400,8 @@ TrimPositions TrimWhitespace(const string16& input,
return TrimStringT(input, StringPiece16(kWhitespaceUTF16), positions, output);
}
-StringPiece16 TrimWhitespaceASCII(StringPiece16 input,
- TrimPositions positions) {
+StringPiece16 TrimWhitespace(StringPiece16 input,
+ TrimPositions positions) {
return TrimStringPieceT(input, StringPiece16(kWhitespaceUTF16), positions);
}
@@ -454,66 +547,45 @@ bool IsStringUTF8(const StringPiece& str) {
return true;
}
-template<typename Iter>
-static inline bool DoLowerCaseEqualsASCII(Iter a_begin,
- Iter a_end,
- const char* b) {
- for (Iter it = a_begin; it != a_end; ++it, ++b) {
- if (!*b || ToLowerASCII(*it) != *b)
+// Implementation note: Normally this function will be called with a hardcoded
+// constant for the lowercase_ascii parameter. Constructing a StringPiece from
+// a C constant requires running strlen, so the result will be two passes
+// through the buffers, one to file the length of lowercase_ascii, and one to
+// compare each letter.
+//
+// This function could have taken a const char* to avoid this and only do one
+// pass through the string. But the strlen is faster than the case-insensitive
+// compares and lets us early-exit in the case that the strings are different
+// lengths (will often be the case for non-matches). So whether one approach or
+// the other will be faster depends on the case.
+//
+// The hardcoded strings are typically very short so it doesn't matter, and the
+// string piece gives additional flexibility for the caller (doesn't have to be
+// null terminated) so we choose the StringPiece route.
+template<typename Str>
+static inline bool DoLowerCaseEqualsASCII(BasicStringPiece<Str> str,
+ StringPiece lowercase_ascii) {
+ if (str.size() != lowercase_ascii.size())
+ return false;
+ for (size_t i = 0; i < str.size(); i++) {
+ if (ToLowerASCII(str[i]) != lowercase_ascii[i])
return false;
}
- return *b == 0;
-}
-
-// Front-ends for LowerCaseEqualsASCII.
-bool LowerCaseEqualsASCII(const std::string& a, const char* b) {
- return DoLowerCaseEqualsASCII(a.begin(), a.end(), b);
-}
-
-bool LowerCaseEqualsASCII(const string16& a, const char* b) {
- return DoLowerCaseEqualsASCII(a.begin(), a.end(), b);
-}
-
-bool LowerCaseEqualsASCII(std::string::const_iterator a_begin,
- std::string::const_iterator a_end,
- const char* b) {
- return DoLowerCaseEqualsASCII(a_begin, a_end, b);
-}
-
-bool LowerCaseEqualsASCII(string16::const_iterator a_begin,
- string16::const_iterator a_end,
- const char* b) {
- return DoLowerCaseEqualsASCII(a_begin, a_end, b);
-}
-
-bool LowerCaseEqualsASCII(const char* a_begin,
- const char* a_end,
- const char* b) {
- return DoLowerCaseEqualsASCII(a_begin, a_end, b);
+ return true;
}
-bool LowerCaseEqualsASCII(const char* a_begin,
- const char* a_end,
- const char* b_begin,
- const char* b_end) {
- while (a_begin != a_end && b_begin != b_end &&
- ToLowerASCII(*a_begin) == *b_begin) {
- a_begin++;
- b_begin++;
- }
- return a_begin == a_end && b_begin == b_end;
+bool LowerCaseEqualsASCII(StringPiece str, StringPiece lowercase_ascii) {
+ return DoLowerCaseEqualsASCII<std::string>(str, lowercase_ascii);
}
-bool LowerCaseEqualsASCII(const char16* a_begin,
- const char16* a_end,
- const char* b) {
- return DoLowerCaseEqualsASCII(a_begin, a_end, b);
+bool LowerCaseEqualsASCII(StringPiece16 str, StringPiece lowercase_ascii) {
+ return DoLowerCaseEqualsASCII<string16>(str, lowercase_ascii);
}
-bool EqualsASCII(const string16& a, const StringPiece& b) {
- if (a.length() != b.length())
+bool EqualsASCII(StringPiece16 str, StringPiece ascii) {
+ if (str.length() != ascii.length())
return false;
- return std::equal(b.begin(), b.end(), a.begin());
+ return std::equal(ascii.begin(), ascii.end(), str.begin());
}
template<typename Str>
@@ -533,7 +605,7 @@ bool StartsWithT(BasicStringPiece<Str> str,
return std::equal(
search_for.begin(), search_for.end(),
source.begin(),
- base::CaseInsensitiveCompareASCII<typename Str::value_type>());
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
default:
NOTREACHED();
@@ -553,23 +625,6 @@ bool StartsWith(StringPiece16 str,
return StartsWithT<string16>(str, search_for, case_sensitivity);
}
-bool StartsWith(const string16& str,
- const string16& search,
- bool case_sensitive) {
- if (!case_sensitive) {
- // This function was originally written using the current locale functions
- // for case-insensitive comparisons. Emulate this behavior until callers
- // can be converted either to use the case-insensitive ASCII one (most
- // callers) or ICU functions in base_i18n.
- if (search.size() > str.size())
- return false;
- return std::equal(search.begin(), search.end(), str.begin(),
- CaseInsensitiveCompare<char16>());
- }
- return StartsWith(StringPiece16(str), StringPiece16(search),
- CompareCase::SENSITIVE);
-}
-
template <typename Str>
bool EndsWithT(BasicStringPiece<Str> str,
BasicStringPiece<Str> search_for,
@@ -588,7 +643,7 @@ bool EndsWithT(BasicStringPiece<Str> str,
return std::equal(
source.begin(), source.end(),
search_for.begin(),
- base::CaseInsensitiveCompareASCII<typename Str::value_type>());
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
default:
NOTREACHED();
@@ -604,28 +659,10 @@ bool EndsWith(StringPiece str,
bool EndsWith(StringPiece16 str,
StringPiece16 search_for,
- CompareCase case_sensitivity) {
+ CompareCase case_sensitivity) {
return EndsWithT<string16>(str, search_for, case_sensitivity);
}
-bool EndsWith(const string16& str,
- const string16& search,
- bool case_sensitive) {
- if (!case_sensitive) {
- // This function was originally written using the current locale functions
- // for case-insensitive comparisons. Emulate this behavior until callers
- // can be converted either to use the case-insensitive ASCII one (most
- // callers) or ICU functions in base_i18n.
- if (search.size() > str.size())
- return false;
- return std::equal(search.begin(), search.end(),
- str.begin() + (str.size() - search.size()),
- CaseInsensitiveCompare<char16>());
- }
- return EndsWith(StringPiece16(str), StringPiece16(search),
- CompareCase::SENSITIVE);
-}
-
char HexDigitToInt(wchar_t c) {
DCHECK(IsHexDigit(c));
if (c >= '0' && c <= '9')
@@ -821,64 +858,54 @@ char* WriteInto(std::string* str, size_t length_with_null) {
return WriteIntoT(str, length_with_null);
}
-char16* WriteInto(base::string16* str, size_t length_with_null) {
+char16* WriteInto(string16* str, size_t length_with_null) {
return WriteIntoT(str, length_with_null);
}
-} // namespace base
-
template<typename STR>
-static STR JoinStringT(const std::vector<STR>& parts, const STR& sep) {
+static STR JoinStringT(const std::vector<STR>& parts,
+ BasicStringPiece<STR> sep) {
if (parts.empty())
return STR();
STR result(parts[0]);
- typename std::vector<STR>::const_iterator iter = parts.begin();
+ auto iter = parts.begin();
++iter;
for (; iter != parts.end(); ++iter) {
- result += sep;
+ sep.AppendToString(&result);
result += *iter;
}
return result;
}
-std::string JoinString(const std::vector<std::string>& parts, char sep) {
- return JoinStringT(parts, std::string(1, sep));
-}
-
-string16 JoinString(const std::vector<string16>& parts, char16 sep) {
- return JoinStringT(parts, string16(1, sep));
-}
-
std::string JoinString(const std::vector<std::string>& parts,
- const std::string& separator) {
+ StringPiece separator) {
return JoinStringT(parts, separator);
}
string16 JoinString(const std::vector<string16>& parts,
- const string16& separator) {
+ StringPiece16 separator) {
return JoinStringT(parts, separator);
}
template<class FormatStringType, class OutStringType>
-OutStringType DoReplaceStringPlaceholders(const FormatStringType& format_string,
- const std::vector<OutStringType>& subst, std::vector<size_t>* offsets) {
+OutStringType DoReplaceStringPlaceholders(
+ const FormatStringType& format_string,
+ const std::vector<OutStringType>& subst,
+ std::vector<size_t>* offsets) {
size_t substitutions = subst.size();
size_t sub_length = 0;
- for (typename std::vector<OutStringType>::const_iterator iter = subst.begin();
- iter != subst.end(); ++iter) {
- sub_length += iter->length();
- }
+ for (const auto& cur : subst)
+ sub_length += cur.length();
OutStringType formatted;
formatted.reserve(format_string.length() + sub_length);
std::vector<ReplacementOffset> r_offsets;
- for (typename FormatStringType::const_iterator i = format_string.begin();
- i != format_string.end(); ++i) {
+ for (auto i = format_string.begin(); i != format_string.end(); ++i) {
if ('$' == *i) {
if (i + 1 != format_string.end()) {
++i;
@@ -916,10 +943,8 @@ OutStringType DoReplaceStringPlaceholders(const FormatStringType& format_string,
}
}
if (offsets) {
- for (std::vector<ReplacementOffset>::const_iterator i = r_offsets.begin();
- i != r_offsets.end(); ++i) {
- offsets->push_back(i->offset);
- }
+ for (const auto& cur : r_offsets)
+ offsets->push_back(cur.offset);
}
return formatted;
}
@@ -930,7 +955,7 @@ string16 ReplaceStringPlaceholders(const string16& format_string,
return DoReplaceStringPlaceholders(format_string, subst, offsets);
}
-std::string ReplaceStringPlaceholders(const base::StringPiece& format_string,
+std::string ReplaceStringPlaceholders(const StringPiece& format_string,
const std::vector<std::string>& subst,
std::vector<size_t>* offsets) {
return DoReplaceStringPlaceholders(format_string, subst, offsets);
@@ -974,9 +999,11 @@ size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
} // namespace
-size_t base::strlcpy(char* dst, const char* src, size_t dst_size) {
+size_t strlcpy(char* dst, const char* src, size_t dst_size) {
return lcpyT<char>(dst, src, dst_size);
}
-size_t base::wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
+size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
return lcpyT<wchar_t>(dst, src, dst_size);
}
+
+} // namespace base
diff --git a/chromium/base/strings/string_util.h b/chromium/base/strings/string_util.h
index 62b3605a893..169726bca5a 100644
--- a/chromium/base/strings/string_util.h
+++ b/chromium/base/strings/string_util.h
@@ -21,20 +21,10 @@
namespace base {
-// C standard-library functions like "strncasecmp" and "snprintf" that aren't
-// cross-platform are provided as "base::strncasecmp", and their prototypes
-// are listed below. These functions are then implemented as inline calls
-// to the platform-specific equivalents in the platform-specific headers.
-
-// Compares the two strings s1 and s2 without regard to case using
-// the current locale; returns 0 if they are equal, 1 if s1 > s2, and -1 if
-// s2 > s1 according to a lexicographic comparison.
-int strcasecmp(const char* s1, const char* s2);
-
-// Compares up to count characters of s1 and s2 without regard to case using
-// the current locale; returns 0 if they are equal, 1 if s1 > s2, and -1 if
-// s2 > s1 according to a lexicographic comparison.
-int strncasecmp(const char* s1, const char* s2, size_t count);
+// C standard-library functions that aren't cross-platform are provided as
+// "base::...", and their prototypes are listed below. These functions are
+// then implemented as inline calls to the platform-specific equivalents in the
+// platform-specific headers.
// Wrapper for vsnprintf that always null-terminates and always returns the
// number of characters that would be in an untruncated formatted
@@ -90,27 +80,38 @@ BASE_EXPORT bool IsWprintfFormatPortable(const wchar_t* format);
// ASCII-specific tolower. The standard library's tolower is locale sensitive,
// so we don't want to use it here.
-template <class Char> inline Char ToLowerASCII(Char c) {
+inline char ToLowerASCII(char c) {
+ return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
+}
+inline char16 ToLowerASCII(char16 c) {
return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
}
// ASCII-specific toupper. The standard library's toupper is locale sensitive,
// so we don't want to use it here.
-template <class Char> inline Char ToUpperASCII(Char c) {
+inline char ToUpperASCII(char c) {
+ return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
+}
+inline char16 ToUpperASCII(char16 c) {
return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
}
-// Function objects to aid in comparing/searching strings.
+// Converts the given string to it's ASCII-lowercase equivalent.
+BASE_EXPORT std::string ToLowerASCII(StringPiece str);
+BASE_EXPORT string16 ToLowerASCII(StringPiece16 str);
-template<typename Char> struct CaseInsensitiveCompare {
- public:
- bool operator()(Char x, Char y) const {
- // TODO(darin): Do we really want to do locale sensitive comparisons here?
- // See http://crbug.com/24917
- return tolower(x) == tolower(y);
- }
-};
+// Converts the given string to it's ASCII-uppercase equivalent.
+BASE_EXPORT std::string ToUpperASCII(StringPiece str);
+BASE_EXPORT string16 ToUpperASCII(StringPiece16 str);
+// Functor for case-insensitive ASCII comparisons for STL algorithms like
+// std::search.
+//
+// Note that a full Unicode version of this functor is not possible to write
+// because case mappings might change the number of characters, depend on
+// context (combining accents), and require handling UTF-16. If you need
+// proper Unicode support, use base::i18n::ToLower/FoldCase and then just
+// use a normal operator== on the result.
template<typename Char> struct CaseInsensitiveCompareASCII {
public:
bool operator()(Char x, Char y) const {
@@ -118,6 +119,22 @@ template<typename Char> struct CaseInsensitiveCompareASCII {
}
};
+// Like strcasecmp for case-insensitive ASCII characters only. Returns:
+// -1 (a < b)
+// 0 (a == b)
+// 1 (a > b)
+// (unlike strcasecmp which can return values greater or less than 1/-1). For
+// full Unicode support, use base::i18n::ToLower or base::i18h::FoldCase
+// and then just call the normal string operators on the result.
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
+// Equality for ASCII case-insensitive comparisons. For full Unicode support,
+// use base::i18n::ToLower or base::i18h::FoldCase and then compare with either
+// == or !=.
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b);
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b);
+
// These threadsafe functions return references to globally unique empty
// strings.
//
@@ -149,10 +166,10 @@ BASE_EXPORT extern const char kUtf8ByteOrderMark[];
// if any characters were removed. |remove_chars| must be null-terminated.
// NOTE: Safe to use the same variable for both |input| and |output|.
BASE_EXPORT bool RemoveChars(const string16& input,
- const base::StringPiece16& remove_chars,
+ const StringPiece16& remove_chars,
string16* output);
BASE_EXPORT bool RemoveChars(const std::string& input,
- const base::StringPiece& remove_chars,
+ const StringPiece& remove_chars,
std::string* output);
// Replaces characters in |replace_chars| from anywhere in |input| with
@@ -161,11 +178,11 @@ BASE_EXPORT bool RemoveChars(const std::string& input,
// |replace_chars| must be null-terminated.
// NOTE: Safe to use the same variable for both |input| and |output|.
BASE_EXPORT bool ReplaceChars(const string16& input,
- const base::StringPiece16& replace_chars,
+ const StringPiece16& replace_chars,
const string16& replace_with,
string16* output);
BASE_EXPORT bool ReplaceChars(const std::string& input,
- const base::StringPiece& replace_chars,
+ const StringPiece& replace_chars,
const std::string& replace_with,
std::string* output);
@@ -182,19 +199,19 @@ enum TrimPositions {
// It is safe to use the same variable for both |input| and |output| (this is
// the normal usage to trim in-place).
BASE_EXPORT bool TrimString(const string16& input,
- base::StringPiece16 trim_chars,
+ StringPiece16 trim_chars,
string16* output);
BASE_EXPORT bool TrimString(const std::string& input,
- base::StringPiece trim_chars,
+ StringPiece trim_chars,
std::string* output);
// StringPiece versions of the above. The returned pieces refer to the original
// buffer.
BASE_EXPORT StringPiece16 TrimString(StringPiece16 input,
- const base::StringPiece16& trim_chars,
+ const StringPiece16& trim_chars,
TrimPositions positions);
BASE_EXPORT StringPiece TrimString(StringPiece input,
- const base::StringPiece& trim_chars,
+ const StringPiece& trim_chars,
TrimPositions positions);
// Truncates a string to the nearest UTF-8 character that will leave
@@ -212,7 +229,7 @@ BASE_EXPORT void TruncateUTF8ToByteSize(const std::string& input,
// NOTE: Safe to use the same variable for both input and output.
BASE_EXPORT TrimPositions TrimWhitespace(const string16& input,
TrimPositions positions,
- base::string16* output);
+ string16* output);
BASE_EXPORT StringPiece16 TrimWhitespace(StringPiece16 input,
TrimPositions positions);
BASE_EXPORT TrimPositions TrimWhitespaceASCII(const std::string& input,
@@ -273,62 +290,17 @@ BASE_EXPORT bool IsStringASCII(const string16& str);
BASE_EXPORT bool IsStringASCII(const std::wstring& str);
#endif
-// Converts the elements of the given string. This version uses a pointer to
-// clearly differentiate it from the non-pointer variant.
-template <class str> inline void StringToLowerASCII(str* s) {
- for (typename str::iterator i = s->begin(); i != s->end(); ++i)
- *i = ToLowerASCII(*i);
-}
-
-template <class str> inline str StringToLowerASCII(const str& s) {
- // for std::string and std::wstring
- str output(s);
- StringToLowerASCII(&output);
- return output;
-}
+// Compare the lower-case form of the given string against the given
+// previously-lower-cased ASCII string (typically a constant).
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece str,
+ StringPiece lowecase_ascii);
+BASE_EXPORT bool LowerCaseEqualsASCII(StringPiece16 str,
+ StringPiece lowecase_ascii);
-// Converts the elements of the given string. This version uses a pointer to
-// clearly differentiate it from the non-pointer variant.
-template <class str> inline void StringToUpperASCII(str* s) {
- for (typename str::iterator i = s->begin(); i != s->end(); ++i)
- *i = ToUpperASCII(*i);
-}
-
-template <class str> inline str StringToUpperASCII(const str& s) {
- // for std::string and std::wstring
- str output(s);
- StringToUpperASCII(&output);
- return output;
-}
-//
-// Compare the lower-case form of the given string against the given ASCII
-// string. This is useful for doing checking if an input string matches some
-// token, and it is optimized to avoid intermediate string copies. This API is
-// borrowed from the equivalent APIs in Mozilla.
-BASE_EXPORT bool LowerCaseEqualsASCII(const std::string& a, const char* b);
-BASE_EXPORT bool LowerCaseEqualsASCII(const string16& a, const char* b);
-
-// Same thing, but with string iterators instead.
-BASE_EXPORT bool LowerCaseEqualsASCII(std::string::const_iterator a_begin,
- std::string::const_iterator a_end,
- const char* b);
-BASE_EXPORT bool LowerCaseEqualsASCII(string16::const_iterator a_begin,
- string16::const_iterator a_end,
- const char* b);
-BASE_EXPORT bool LowerCaseEqualsASCII(const char* a_begin,
- const char* a_end,
- const char* b);
-BASE_EXPORT bool LowerCaseEqualsASCII(const char* a_begin,
- const char* a_end,
- const char* b_begin,
- const char* b_end);
-BASE_EXPORT bool LowerCaseEqualsASCII(const char16* a_begin,
- const char16* a_end,
- const char* b);
-
-// Performs a case-sensitive string compare. The behavior is undefined if both
-// strings are not ASCII.
-BASE_EXPORT bool EqualsASCII(const string16& a, const StringPiece& b);
+// Performs a case-sensitive string compare of the given 16-bit string against
+// the given 8-bit ASCII string (typically a constant). The behavior is
+// undefined if the |ascii| string is not ASCII.
+BASE_EXPORT bool EqualsASCII(StringPiece16 str, StringPiece ascii);
// Indicates case sensitivity of comparisons. Only ASCII case insensitivity
// is supported. Full Unicode case-insensitive conversions would need to go in
@@ -356,29 +328,6 @@ BASE_EXPORT bool EndsWith(StringPiece16 str,
StringPiece16 search_for,
CompareCase case_sensitivity);
-// DEPRECATED. Returns true if str starts/ends with search, or false otherwise.
-// TODO(brettw) remove in favor of the "enum" versions above.
-inline bool StartsWithASCII(const std::string& str,
- const std::string& search,
- bool case_sensitive) {
- return StartsWith(StringPiece(str), StringPiece(search),
- case_sensitive ? CompareCase::SENSITIVE
- : CompareCase::INSENSITIVE_ASCII);
-}
-BASE_EXPORT bool StartsWith(const string16& str,
- const string16& search,
- bool case_sensitive);
-inline bool EndsWith(const std::string& str,
- const std::string& search,
- bool case_sensitive) {
- return EndsWith(StringPiece(str), StringPiece(search),
- case_sensitive ? CompareCase::SENSITIVE
- : CompareCase::INSENSITIVE_ASCII);
-}
-BASE_EXPORT bool EndsWith(const string16& str,
- const string16& search,
- bool case_sensitive);
-
// Determines the type of ASCII character, independent of locale (the C
// library versions will change based on locale).
template <typename Char>
@@ -439,7 +388,7 @@ BASE_EXPORT void ReplaceFirstSubstringAfterOffset(
// characters, for example:
// std::replace(str.begin(), str.end(), 'a', 'b');
BASE_EXPORT void ReplaceSubstringsAfterOffset(
- base::string16* str,
+ string16* str,
size_t start_offset,
StringPiece16 find_this,
StringPiece16 replace_with);
@@ -470,55 +419,44 @@ BASE_EXPORT void ReplaceSubstringsAfterOffset(
// than str.c_str() will get back a string of whatever size |str| had on entry
// to this function (probably 0).
BASE_EXPORT char* WriteInto(std::string* str, size_t length_with_null);
-BASE_EXPORT char16* WriteInto(base::string16* str, size_t length_with_null);
+BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
#ifndef OS_WIN
BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
#endif
-} // namespace base
-
-#if defined(OS_WIN)
-#include "base/strings/string_util_win.h"
-#elif defined(OS_POSIX)
-#include "base/strings/string_util_posix.h"
-#else
-#error Define string operations appropriately for your platform
-#endif
-
-//-----------------------------------------------------------------------------
-
// Does the opposite of SplitString().
-BASE_EXPORT base::string16 JoinString(const std::vector<base::string16>& parts,
- base::char16 s);
-BASE_EXPORT std::string JoinString(
- const std::vector<std::string>& parts, char s);
-
-// Join |parts| using |separator|.
-BASE_EXPORT std::string JoinString(
- const std::vector<std::string>& parts,
- const std::string& separator);
-BASE_EXPORT base::string16 JoinString(
- const std::vector<base::string16>& parts,
- const base::string16& separator);
+BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
+ StringPiece separator);
+BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
+ StringPiece16 separator);
// Replace $1-$2-$3..$9 in the format string with |a|-|b|-|c|..|i| respectively.
// Additionally, any number of consecutive '$' characters is replaced by that
// number less one. Eg $$->$, $$$->$$, etc. The offsets parameter here can be
// NULL. This only allows you to use up to nine replacements.
-BASE_EXPORT base::string16 ReplaceStringPlaceholders(
- const base::string16& format_string,
- const std::vector<base::string16>& subst,
+BASE_EXPORT string16 ReplaceStringPlaceholders(
+ const string16& format_string,
+ const std::vector<string16>& subst,
std::vector<size_t>* offsets);
BASE_EXPORT std::string ReplaceStringPlaceholders(
- const base::StringPiece& format_string,
+ const StringPiece& format_string,
const std::vector<std::string>& subst,
std::vector<size_t>* offsets);
// Single-string shortcut for ReplaceStringHolders. |offset| may be NULL.
-BASE_EXPORT base::string16 ReplaceStringPlaceholders(
- const base::string16& format_string,
- const base::string16& a,
- size_t* offset);
+BASE_EXPORT string16 ReplaceStringPlaceholders(const string16& format_string,
+ const string16& a,
+ size_t* offset);
+
+} // namespace base
+
+#if defined(OS_WIN)
+#include "base/strings/string_util_win.h"
+#elif defined(OS_POSIX)
+#include "base/strings/string_util_posix.h"
+#else
+#error Define string operations appropriately for your platform
+#endif
#endif // BASE_STRINGS_STRING_UTIL_H_
diff --git a/chromium/base/strings/string_util_posix.h b/chromium/base/strings/string_util_posix.h
index d31a9fcbefd..9e96697ff56 100644
--- a/chromium/base/strings/string_util_posix.h
+++ b/chromium/base/strings/string_util_posix.h
@@ -20,14 +20,6 @@ inline char* strdup(const char* str) {
return ::strdup(str);
}
-inline int strcasecmp(const char* string1, const char* string2) {
- return ::strcasecmp(string1, string2);
-}
-
-inline int strncasecmp(const char* string1, const char* string2, size_t count) {
- return ::strncasecmp(string1, string2, count);
-}
-
inline int vsnprintf(char* buffer, size_t size,
const char* format, va_list arguments) {
return ::vsnprintf(buffer, size, format, arguments);
diff --git a/chromium/base/strings/string_util_unittest.cc b/chromium/base/strings/string_util_unittest.cc
index 02b184cc620..187e49e6698 100644
--- a/chromium/base/strings/string_util_unittest.cc
+++ b/chromium/base/strings/string_util_unittest.cc
@@ -503,30 +503,30 @@ TEST(StringUtilTest, ConvertASCII) {
EXPECT_EQ(0, string_with_nul.compare(narrow_with_nul));
}
+TEST(StringUtilTest, ToLowerASCII) {
+ EXPECT_EQ('c', ToLowerASCII('C'));
+ EXPECT_EQ('c', ToLowerASCII('c'));
+ EXPECT_EQ('2', ToLowerASCII('2'));
+
+ EXPECT_EQ(static_cast<char16>('c'), ToLowerASCII(static_cast<char16>('C')));
+ EXPECT_EQ(static_cast<char16>('c'), ToLowerASCII(static_cast<char16>('c')));
+ EXPECT_EQ(static_cast<char16>('2'), ToLowerASCII(static_cast<char16>('2')));
+
+ EXPECT_EQ("cc2", ToLowerASCII("Cc2"));
+ EXPECT_EQ(ASCIIToUTF16("cc2"), ToLowerASCII(ASCIIToUTF16("Cc2")));
+}
+
TEST(StringUtilTest, ToUpperASCII) {
EXPECT_EQ('C', ToUpperASCII('C'));
EXPECT_EQ('C', ToUpperASCII('c'));
EXPECT_EQ('2', ToUpperASCII('2'));
- EXPECT_EQ(L'C', ToUpperASCII(L'C'));
- EXPECT_EQ(L'C', ToUpperASCII(L'c'));
- EXPECT_EQ(L'2', ToUpperASCII(L'2'));
-
- std::string in_place_a("Cc2");
- StringToUpperASCII(&in_place_a);
- EXPECT_EQ("CC2", in_place_a);
+ EXPECT_EQ(static_cast<char16>('C'), ToUpperASCII(static_cast<char16>('C')));
+ EXPECT_EQ(static_cast<char16>('C'), ToUpperASCII(static_cast<char16>('c')));
+ EXPECT_EQ(static_cast<char16>('2'), ToUpperASCII(static_cast<char16>('2')));
- std::wstring in_place_w(L"Cc2");
- StringToUpperASCII(&in_place_w);
- EXPECT_EQ(L"CC2", in_place_w);
-
- std::string original_a("Cc2");
- std::string upper_a = StringToUpperASCII(original_a);
- EXPECT_EQ("CC2", upper_a);
-
- std::wstring original_w(L"Cc2");
- std::wstring upper_w = StringToUpperASCII(original_w);
- EXPECT_EQ(L"CC2", upper_w);
+ EXPECT_EQ("CC2", ToUpperASCII("Cc2"));
+ EXPECT_EQ(ASCIIToUTF16("CC2"), ToUpperASCII(ASCIIToUTF16("Cc2")));
}
TEST(StringUtilTest, LowerCaseEqualsASCII) {
@@ -669,26 +669,7 @@ TEST(StringUtilTest, HexDigitToInt) {
EXPECT_EQ(15, HexDigitToInt('f'));
}
-// Test for JoinString
TEST(StringUtilTest, JoinString) {
- std::vector<std::string> in;
- EXPECT_EQ("", JoinString(in, ','));
-
- in.push_back("a");
- EXPECT_EQ("a", JoinString(in, ','));
-
- in.push_back("b");
- in.push_back("c");
- EXPECT_EQ("a,b,c", JoinString(in, ','));
-
- in.push_back(std::string());
- EXPECT_EQ("a,b,c,", JoinString(in, ','));
- in.push_back(" ");
- EXPECT_EQ("a|b|c|| ", JoinString(in, '|'));
-}
-
-// Test for JoinString overloaded with std::string separator
-TEST(StringUtilTest, JoinStringWithString) {
std::string separator(", ");
std::vector<std::string> parts;
EXPECT_EQ(std::string(), JoinString(parts, separator));
@@ -706,8 +687,7 @@ TEST(StringUtilTest, JoinStringWithString) {
EXPECT_EQ("a|b|c|| ", JoinString(parts, "|"));
}
-// Test for JoinString overloaded with string16 separator
-TEST(StringUtilTest, JoinStringWithString16) {
+TEST(StringUtilTest, JoinString16) {
string16 separator = ASCIIToUTF16(", ");
std::vector<string16> parts;
EXPECT_EQ(string16(), JoinString(parts, separator));
@@ -726,59 +706,83 @@ TEST(StringUtilTest, JoinStringWithString16) {
}
TEST(StringUtilTest, StartsWith) {
- EXPECT_TRUE(StartsWithASCII("javascript:url", "javascript", true));
- EXPECT_FALSE(StartsWithASCII("JavaScript:url", "javascript", true));
- EXPECT_TRUE(StartsWithASCII("javascript:url", "javascript", false));
- EXPECT_TRUE(StartsWithASCII("JavaScript:url", "javascript", false));
- EXPECT_FALSE(StartsWithASCII("java", "javascript", true));
- EXPECT_FALSE(StartsWithASCII("java", "javascript", false));
- EXPECT_FALSE(StartsWithASCII(std::string(), "javascript", false));
- EXPECT_FALSE(StartsWithASCII(std::string(), "javascript", true));
- EXPECT_TRUE(StartsWithASCII("java", std::string(), false));
- EXPECT_TRUE(StartsWithASCII("java", std::string(), true));
+ EXPECT_TRUE(StartsWith("javascript:url", "javascript",
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith("JavaScript:url", "javascript",
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith("javascript:url", "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith("JavaScript:url", "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith("java", "javascript", base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith("java", "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(std::string(), "javascript",
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(std::string(), "javascript",
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith("java", std::string(),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith("java", std::string(), base::CompareCase::SENSITIVE));
EXPECT_TRUE(StartsWith(ASCIIToUTF16("javascript:url"),
- ASCIIToUTF16("javascript"), true));
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
EXPECT_FALSE(StartsWith(ASCIIToUTF16("JavaScript:url"),
- ASCIIToUTF16("javascript"), true));
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
EXPECT_TRUE(StartsWith(ASCIIToUTF16("javascript:url"),
- ASCIIToUTF16("javascript"), false));
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
EXPECT_TRUE(StartsWith(ASCIIToUTF16("JavaScript:url"),
- ASCIIToUTF16("javascript"), false));
- EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"),
- ASCIIToUTF16("javascript"), true));
- EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"),
- ASCIIToUTF16("javascript"), false));
- EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"), false));
- EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"), true));
- EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(), false));
- EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(), true));
+ ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"), ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(StartsWith(ASCIIToUTF16("java"), ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(StartsWith(string16(), ASCIIToUTF16("javascript"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(StartsWith(ASCIIToUTF16("java"), string16(),
+ base::CompareCase::SENSITIVE));
}
TEST(StringUtilTest, EndsWith) {
- EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"),
- ASCIIToUTF16(".plugin"), true));
- EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.Plugin"),
- ASCIIToUTF16(".plugin"), true));
- EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"),
- ASCIIToUTF16(".plugin"), false));
- EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.Plugin"),
- ASCIIToUTF16(".plugin"), false));
- EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"), true));
- EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"), false));
- EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"),
- ASCIIToUTF16(".plugin"), true));
- EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"),
- ASCIIToUTF16(".plugin"), false));
- EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"), false));
- EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"), true));
- EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(), false));
- EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(), true));
- EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"),
- ASCIIToUTF16(".plugin"), false));
- EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"), true));
- EXPECT_TRUE(EndsWith(string16(), string16(), false));
- EXPECT_TRUE(EndsWith(string16(), string16(), true));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.Plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.Plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16(".plug"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_FALSE(EndsWith(ASCIIToUTF16("Foo.plugin Bar"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_FALSE(EndsWith(string16(), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16("Foo.plugin"), string16(),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(ASCIIToUTF16(".plugin"), ASCIIToUTF16(".plugin"),
+ base::CompareCase::SENSITIVE));
+ EXPECT_TRUE(
+ EndsWith(string16(), string16(), base::CompareCase::INSENSITIVE_ASCII));
+ EXPECT_TRUE(EndsWith(string16(), string16(), base::CompareCase::SENSITIVE));
}
TEST(StringUtilTest, GetStringFWithOffsets) {
@@ -1056,6 +1060,26 @@ TEST(StringUtilTest, ContainsOnlyChars) {
kWhitespaceUTF16));
}
+TEST(StringUtilTest, CompareCaseInsensitiveASCII) {
+ EXPECT_EQ(0, CompareCaseInsensitiveASCII("", ""));
+ EXPECT_EQ(0, CompareCaseInsensitiveASCII("Asdf", "aSDf"));
+
+ // Differing lengths.
+ EXPECT_EQ(-1, CompareCaseInsensitiveASCII("Asdf", "aSDfA"));
+ EXPECT_EQ(1, CompareCaseInsensitiveASCII("AsdfA", "aSDf"));
+
+ // Differing values.
+ EXPECT_EQ(-1, CompareCaseInsensitiveASCII("AsdfA", "aSDfb"));
+ EXPECT_EQ(1, CompareCaseInsensitiveASCII("Asdfb", "aSDfA"));
+}
+
+TEST(StringUtilTest, EqualsCaseInsensitiveASCII) {
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("", ""));
+ EXPECT_TRUE(EqualsCaseInsensitiveASCII("Asdf", "aSDF"));
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("bsdf", "aSDF"));
+ EXPECT_FALSE(EqualsCaseInsensitiveASCII("Asdf", "aSDFz"));
+}
+
class WriteIntoTest : public testing::Test {
protected:
static void WritesCorrectly(size_t num_chars) {
diff --git a/chromium/base/strings/string_util_win.h b/chromium/base/strings/string_util_win.h
index 7c1e34cf8fc..839a799a157 100644
--- a/chromium/base/strings/string_util_win.h
+++ b/chromium/base/strings/string_util_win.h
@@ -20,14 +20,6 @@ inline char* strdup(const char* str) {
return _strdup(str);
}
-inline int strcasecmp(const char* s1, const char* s2) {
- return _stricmp(s1, s2);
-}
-
-inline int strncasecmp(const char* s1, const char* s2, size_t count) {
- return _strnicmp(s1, s2, count);
-}
-
inline int vsnprintf(char* buffer, size_t size,
const char* format, va_list arguments) {
int length = vsnprintf_s(buffer, size, size - 1, format, arguments);
diff --git a/chromium/base/strings/sys_string_conversions_unittest.cc b/chromium/base/strings/sys_string_conversions_unittest.cc
index 0cdd4281ecf..90c4767e25d 100644
--- a/chromium/base/strings/sys_string_conversions_unittest.cc
+++ b/chromium/base/strings/sys_string_conversions_unittest.cc
@@ -76,7 +76,7 @@ TEST(SysStrings, SysUTF8ToWide) {
TEST(SysStrings, SysWideToNativeMB) {
#if !defined(SYSTEM_NATIVE_UTF8)
- ScopedLocale locale("en_US.utf-8");
+ ScopedLocale locale("en_US.UTF-8");
#endif
EXPECT_EQ("Hello, world", SysWideToNativeMB(L"Hello, world"));
EXPECT_EQ("\xe4\xbd\xa0\xe5\xa5\xbd", SysWideToNativeMB(L"\x4f60\x597d"));
@@ -108,7 +108,7 @@ TEST(SysStrings, SysWideToNativeMB) {
// We assume the test is running in a UTF8 locale.
TEST(SysStrings, SysNativeMBToWide) {
#if !defined(SYSTEM_NATIVE_UTF8)
- ScopedLocale locale("en_US.utf-8");
+ ScopedLocale locale("en_US.UTF-8");
#endif
EXPECT_EQ(L"Hello, world", SysNativeMBToWide("Hello, world"));
EXPECT_EQ(L"\x4f60\x597d", SysNativeMBToWide("\xe4\xbd\xa0\xe5\xa5\xbd"));
@@ -164,7 +164,7 @@ static const wchar_t* const kConvertRoundtripCases[] = {
TEST(SysStrings, SysNativeMBAndWide) {
#if !defined(SYSTEM_NATIVE_UTF8)
- ScopedLocale locale("en_US.utf-8");
+ ScopedLocale locale("en_US.UTF-8");
#endif
for (size_t i = 0; i < arraysize(kConvertRoundtripCases); ++i) {
std::wstring wide = kConvertRoundtripCases[i];
diff --git a/chromium/base/strings/utf_string_conversion_utils.h b/chromium/base/strings/utf_string_conversion_utils.h
index 22abbbc9e79..a1b2e64a182 100644
--- a/chromium/base/strings/utf_string_conversion_utils.h
+++ b/chromium/base/strings/utf_string_conversion_utils.h
@@ -60,7 +60,6 @@ BASE_EXPORT bool ReadUnicodeCharacter(const wchar_t* src,
// Appends a UTF-8 character to the given 8-bit string. Returns the number of
// bytes written.
-// TODO(brettw) Bug 79631: This function should not be exposed.
BASE_EXPORT size_t WriteUnicodeCharacter(uint32 code_point,
std::string* output);
diff --git a/chromium/base/strings/utf_string_conversions.cc b/chromium/base/strings/utf_string_conversions.cc
index 1480d48086f..b6cf6ff9b10 100644
--- a/chromium/base/strings/utf_string_conversions.cc
+++ b/chromium/base/strings/utf_string_conversions.cc
@@ -73,7 +73,7 @@ bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
}
}
-std::wstring UTF8ToWide(const StringPiece& utf8) {
+std::wstring UTF8ToWide(StringPiece utf8) {
if (IsStringASCII(utf8)) {
return std::wstring(utf8.begin(), utf8.end());
}
@@ -153,7 +153,7 @@ bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
}
}
-string16 UTF8ToUTF16(const StringPiece& utf8) {
+string16 UTF8ToUTF16(StringPiece utf8) {
if (IsStringASCII(utf8)) {
return string16(utf8.begin(), utf8.end());
}
@@ -176,7 +176,7 @@ bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
}
}
-std::string UTF16ToUTF8(const string16& utf16) {
+std::string UTF16ToUTF8(StringPiece16 utf16) {
if (IsStringASCII(utf16)) {
return std::string(utf16.begin(), utf16.end());
}
@@ -195,7 +195,7 @@ bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
return UTF8ToWide(src, src_len, output);
}
-string16 UTF8ToUTF16(const StringPiece& utf8) {
+string16 UTF8ToUTF16(StringPiece utf8) {
return UTF8ToWide(utf8);
}
@@ -203,18 +203,24 @@ bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
return WideToUTF8(src, src_len, output);
}
-std::string UTF16ToUTF8(const string16& utf16) {
- return WideToUTF8(utf16);
+std::string UTF16ToUTF8(StringPiece16 utf16) {
+ if (IsStringASCII(utf16))
+ return std::string(utf16.data(), utf16.data() + utf16.length());
+
+ std::string ret;
+ PrepareForUTF8Output(utf16.data(), utf16.length(), &ret);
+ ConvertUnicode(utf16.data(), utf16.length(), &ret);
+ return ret;
}
#endif
-string16 ASCIIToUTF16(const StringPiece& ascii) {
+string16 ASCIIToUTF16(StringPiece ascii) {
DCHECK(IsStringASCII(ascii)) << ascii;
return string16(ascii.begin(), ascii.end());
}
-std::string UTF16ToASCII(const string16& utf16) {
+std::string UTF16ToASCII(StringPiece16 utf16) {
DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
return std::string(utf16.begin(), utf16.end());
}
diff --git a/chromium/base/strings/utf_string_conversions.h b/chromium/base/strings/utf_string_conversions.h
index 06a3bc64767..9b15730df38 100644
--- a/chromium/base/strings/utf_string_conversions.h
+++ b/chromium/base/strings/utf_string_conversions.h
@@ -24,7 +24,7 @@ BASE_EXPORT bool WideToUTF8(const wchar_t* src, size_t src_len,
BASE_EXPORT std::string WideToUTF8(const std::wstring& wide);
BASE_EXPORT bool UTF8ToWide(const char* src, size_t src_len,
std::wstring* output);
-BASE_EXPORT std::wstring UTF8ToWide(const StringPiece& utf8);
+BASE_EXPORT std::wstring UTF8ToWide(StringPiece utf8);
BASE_EXPORT bool WideToUTF16(const wchar_t* src, size_t src_len,
string16* output);
@@ -34,18 +34,18 @@ BASE_EXPORT bool UTF16ToWide(const char16* src, size_t src_len,
BASE_EXPORT std::wstring UTF16ToWide(const string16& utf16);
BASE_EXPORT bool UTF8ToUTF16(const char* src, size_t src_len, string16* output);
-BASE_EXPORT string16 UTF8ToUTF16(const StringPiece& utf8);
+BASE_EXPORT string16 UTF8ToUTF16(StringPiece utf8);
BASE_EXPORT bool UTF16ToUTF8(const char16* src, size_t src_len,
std::string* output);
-BASE_EXPORT std::string UTF16ToUTF8(const string16& utf16);
+BASE_EXPORT std::string UTF16ToUTF8(StringPiece16 utf16);
// This converts an ASCII string, typically a hardcoded constant, to a UTF16
// string.
-BASE_EXPORT string16 ASCIIToUTF16(const StringPiece& ascii);
+BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii);
// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
// beforehand.
-BASE_EXPORT std::string UTF16ToASCII(const string16& utf16);
+BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16);
} // namespace base
diff --git a/chromium/base/sync_socket.h b/chromium/base/sync_socket.h
index 36d6bc1f59b..201fb1c1553 100644
--- a/chromium/base/sync_socket.h
+++ b/chromium/base/sync_socket.h
@@ -83,10 +83,8 @@ class BASE_EXPORT SyncSocket {
TimeDelta timeout);
// Returns the number of bytes available. If non-zero, Receive() will not
- // not block when called. NOTE: Some implementations cannot reliably
- // determine the number of bytes available so avoid using the returned
- // size as a promise and simply test against zero.
- size_t Peek();
+ // not block when called.
+ virtual size_t Peek();
// Extracts the contained handle. Used for transferring between
// processes.
diff --git a/chromium/base/sync_socket_unittest.cc b/chromium/base/sync_socket_unittest.cc
index 7c8c97cbc19..ff9b8bc8c2f 100644
--- a/chromium/base/sync_socket_unittest.cc
+++ b/chromium/base/sync_socket_unittest.cc
@@ -3,6 +3,10 @@
// found in the LICENSE file.
#include "base/basictypes.h"
+// TODO(ellyjones): Remove once http://crbug.com/523296 is fixed.
+#if defined(OS_IOS) && !TARGET_IPHONE_SIMULATOR
+#include "base/ios/ios_util.h"
+#endif
#include "base/sync_socket.h"
#include "base/threading/simple_thread.h"
#include "base/time/time.h"
@@ -114,6 +118,11 @@ TEST(CancelableSyncSocket, ClonedSendReceivePeek) {
}
TEST(CancelableSyncSocket, CancelReceiveShutdown) {
+// TODO(ellyjones): This test fails on iOS 7 devices. http://crbug.com/523296
+#if defined(OS_IOS) && !TARGET_IPHONE_SIMULATOR
+ if (!base::ios::IsRunningOnIOS8OrLater())
+ return;
+#endif
base::CancelableSyncSocket socket_a, socket_b;
ASSERT_TRUE(base::CancelableSyncSocket::CreatePair(&socket_a, &socket_b));
diff --git a/chromium/base/sync_socket_win.cc b/chromium/base/sync_socket_win.cc
index e5088162433..e16b925bbdf 100644
--- a/chromium/base/sync_socket_win.cc
+++ b/chromium/base/sync_socket_win.cc
@@ -5,6 +5,7 @@
#include "base/sync_socket.h"
#include "base/logging.h"
+#include "base/rand_util.h"
#include "base/threading/thread_restrictions.h"
#include "base/win/scoped_handle.h"
@@ -39,9 +40,8 @@ bool CreatePairImpl(HANDLE* socket_a, HANDLE* socket_b, bool overlapped) {
flags |= FILE_FLAG_OVERLAPPED;
do {
- unsigned int rnd_name;
- if (rand_s(&rnd_name) != 0)
- return false;
+ unsigned long rnd_name;
+ RandBytes(&rnd_name, sizeof(rnd_name));
swprintf(name, kPipePathMax,
kPipeNameFormat,
diff --git a/chromium/base/synchronization/condition_variable.h b/chromium/base/synchronization/condition_variable.h
index 5d8507d4373..91e4d1350d6 100644
--- a/chromium/base/synchronization/condition_variable.h
+++ b/chromium/base/synchronization/condition_variable.h
@@ -73,6 +73,7 @@
#include "base/base_export.h"
#include "base/basictypes.h"
+#include "base/logging.h"
#include "base/synchronization/lock.h"
namespace base {
@@ -104,7 +105,7 @@ class BASE_EXPORT ConditionVariable {
#elif defined(OS_POSIX)
pthread_cond_t condition_;
pthread_mutex_t* user_mutex_;
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
base::Lock* user_lock_; // Needed to adjust shadow lock state on wait.
#endif
diff --git a/chromium/base/synchronization/condition_variable_posix.cc b/chromium/base/synchronization/condition_variable_posix.cc
index 013284c888f..0e4668feb2a 100644
--- a/chromium/base/synchronization/condition_variable_posix.cc
+++ b/chromium/base/synchronization/condition_variable_posix.cc
@@ -7,7 +7,6 @@
#include <errno.h>
#include <sys/time.h>
-#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
@@ -16,7 +15,7 @@ namespace base {
ConditionVariable::ConditionVariable(Lock* user_lock)
: user_mutex_(user_lock->lock_.native_handle())
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
, user_lock_(user_lock)
#endif
{
@@ -42,18 +41,32 @@ ConditionVariable::ConditionVariable(Lock* user_lock)
}
ConditionVariable::~ConditionVariable() {
+#if defined(OS_MACOSX)
+ // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+ // Darwin kernel. http://crbug.com/517681.
+ {
+ base::Lock lock;
+ base::AutoLock l(lock);
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ pthread_cond_timedwait_relative_np(&condition_, lock.lock_.native_handle(),
+ &ts);
+ }
+#endif
+
int rv = pthread_cond_destroy(&condition_);
DCHECK_EQ(0, rv);
}
void ConditionVariable::Wait() {
base::ThreadRestrictions::AssertWaitAllowed();
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
int rv = pthread_cond_wait(&condition_, user_mutex_);
DCHECK_EQ(0, rv);
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
@@ -66,7 +79,7 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
relative_time.tv_nsec =
(usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
@@ -104,7 +117,7 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
#endif // OS_MACOSX
DCHECK(rv == 0 || rv == ETIMEDOUT);
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
diff --git a/chromium/base/synchronization/condition_variable_win.cc b/chromium/base/synchronization/condition_variable_win.cc
index 470e564c5b0..4256ac8224d 100644
--- a/chromium/base/synchronization/condition_variable_win.cc
+++ b/chromium/base/synchronization/condition_variable_win.cc
@@ -8,7 +8,6 @@
#include <stack>
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
@@ -99,7 +98,7 @@ void WinVistaCondVar::TimedWait(const TimeDelta& max_time) {
DWORD timeout = static_cast<DWORD>(max_time.InMilliseconds());
CRITICAL_SECTION* cs = user_lock_.lock_.native_handle();
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
user_lock_.CheckHeldAndUnmark();
#endif
diff --git a/chromium/base/synchronization/lock.cc b/chromium/base/synchronization/lock.cc
index b1576c50c71..03297ada52f 100644
--- a/chromium/base/synchronization/lock.cc
+++ b/chromium/base/synchronization/lock.cc
@@ -6,10 +6,9 @@
// is functionally a wrapper around the LockImpl class, so the only
// real intelligence in the class is in the debugging logic.
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-
#include "base/synchronization/lock.h"
-#include "base/logging.h"
+
+#if DCHECK_IS_ON()
namespace base {
@@ -36,4 +35,4 @@ void Lock::CheckUnheldAndMark() {
} // namespace base
-#endif // !NDEBUG || DCHECK_ALWAYS_ON
+#endif // DCHECK_IS_ON()
diff --git a/chromium/base/synchronization/lock.h b/chromium/base/synchronization/lock.h
index f384e414728..81e274809f8 100644
--- a/chromium/base/synchronization/lock.h
+++ b/chromium/base/synchronization/lock.h
@@ -6,6 +6,7 @@
#define BASE_SYNCHRONIZATION_LOCK_H_
#include "base/base_export.h"
+#include "base/logging.h"
#include "base/synchronization/lock_impl.h"
#include "base/threading/platform_thread.h"
@@ -16,7 +17,7 @@ namespace base {
// AssertAcquired() method.
class BASE_EXPORT Lock {
public:
-#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#if !DCHECK_IS_ON()
// Optimized wrapper implementation
Lock() : lock_() {}
~Lock() {}
@@ -56,7 +57,7 @@ class BASE_EXPORT Lock {
}
void AssertAcquired() const;
-#endif // NDEBUG && !DCHECK_ALWAYS_ON
+#endif // DCHECK_IS_ON()
#if defined(OS_POSIX)
// The posix implementation of ConditionVariable needs to be able
@@ -70,7 +71,7 @@ class BASE_EXPORT Lock {
#endif
private:
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+#if DCHECK_IS_ON()
// Members and routines taking care of locks assertions.
// Note that this checks for recursive locks and allows them
// if the variable is set. This is allowed by the underlying implementation
@@ -82,7 +83,7 @@ class BASE_EXPORT Lock {
// All private data is implicitly protected by lock_.
// Be VERY careful to only access members under that lock.
base::PlatformThreadRef owning_thread_ref_;
-#endif // !NDEBUG || DCHECK_ALWAYS_ON
+#endif // DCHECK_IS_ON()
// Platform specific underlying lock implementation.
internal::LockImpl lock_;
diff --git a/chromium/base/synchronization/waitable_event_watcher_win.cc b/chromium/base/synchronization/waitable_event_watcher_win.cc
index 46d47ac581a..6533539d14d 100644
--- a/chromium/base/synchronization/waitable_event_watcher_win.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_win.cc
@@ -22,7 +22,7 @@ bool WaitableEventWatcher::StartWatching(
const EventCallback& callback) {
callback_ = callback;
event_ = event;
- return watcher_.StartWatching(event->handle(), this);
+ return watcher_.StartWatchingOnce(event->handle(), this);
}
void WaitableEventWatcher::StopWatching() {
diff --git a/chromium/base/synchronization/waitable_event_win.cc b/chromium/base/synchronization/waitable_event_win.cc
index 4db56277b7c..2d6d7348755 100644
--- a/chromium/base/synchronization/waitable_event_win.cc
+++ b/chromium/base/synchronization/waitable_event_win.cc
@@ -73,7 +73,7 @@ bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
size_t WaitableEvent::WaitMany(WaitableEvent** events, size_t count) {
base::ThreadRestrictions::AssertWaitAllowed();
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
- CHECK_LE(count, MAXIMUM_WAIT_OBJECTS)
+ CHECK_LE(count, static_cast<size_t>(MAXIMUM_WAIT_OBJECTS))
<< "Can only wait on " << MAXIMUM_WAIT_OBJECTS << " with WaitMany";
for (size_t i = 0; i < count; ++i)
diff --git a/chromium/base/sys_info.cc b/chromium/base/sys_info.cc
index 6e283be1f95..f24ebd3547d 100644
--- a/chromium/base/sys_info.cc
+++ b/chromium/base/sys_info.cc
@@ -48,7 +48,7 @@ bool SysInfo::IsLowEndDevice() {
}
#endif
-#if !defined(OS_MACOSX) || defined(OS_IOS)
+#if (!defined(OS_MACOSX) || defined(OS_IOS)) && !defined(OS_ANDROID)
std::string SysInfo::HardwareModelName() {
return std::string();
}
diff --git a/chromium/base/sys_info.h b/chromium/base/sys_info.h
index 654d6945a34..5a81dc1583f 100644
--- a/chromium/base/sys_info.h
+++ b/chromium/base/sys_info.h
@@ -52,9 +52,9 @@ class BASE_EXPORT SysInfo {
static int64 Uptime();
// Returns a descriptive string for the current machine model or an empty
- // string if machime model is unknown or an error occured.
- // e.g. MacPro1,1 on Mac.
- // Only implemented on OS X, will return an empty string on other platforms.
+ // string if the machine model is unknown or an error occured.
+ // e.g. "MacPro1,1" on Mac, or "Nexus 5" on Android. Only implemented on OS X,
+ // Android, and Chrome OS. This returns an empty string on other platforms.
static std::string HardwareModelName();
// Returns the name of the host operating system.
@@ -129,9 +129,6 @@ class BASE_EXPORT SysInfo {
// Returns the Android build ID.
static std::string GetAndroidBuildID();
- // Returns the device's name.
- static std::string GetDeviceName();
-
static int DalvikHeapSizeMB();
static int DalvikHeapGrowthLimitMB();
#endif // defined(OS_ANDROID)
diff --git a/chromium/base/sys_info_android.cc b/chromium/base/sys_info_android.cc
index 245097ffdc0..c288ae2fe87 100644
--- a/chromium/base/sys_info_android.cc
+++ b/chromium/base/sys_info_android.cc
@@ -155,28 +155,16 @@ int GetDalvikHeapGrowthLimitMB() {
namespace base {
-std::string SysInfo::OperatingSystemName() {
- return "Android";
-}
-
-std::string SysInfo::GetAndroidBuildCodename() {
- char os_version_codename_str[PROP_VALUE_MAX];
- __system_property_get("ro.build.version.codename", os_version_codename_str);
- return std::string(os_version_codename_str);
-}
-
-std::string SysInfo::GetAndroidBuildID() {
- char os_build_id_str[PROP_VALUE_MAX];
- __system_property_get("ro.build.id", os_build_id_str);
- return std::string(os_build_id_str);
-}
-
-std::string SysInfo::GetDeviceName() {
+std::string SysInfo::HardwareModelName() {
char device_model_str[PROP_VALUE_MAX];
__system_property_get("ro.product.model", device_model_str);
return std::string(device_model_str);
}
+std::string SysInfo::OperatingSystemName() {
+ return "Android";
+}
+
std::string SysInfo::OperatingSystemVersion() {
int32 major, minor, bugfix;
OperatingSystemVersionNumbers(&major, &minor, &bugfix);
@@ -195,6 +183,18 @@ void SysInfo::OperatingSystemVersionNumbers(int32* major_version,
bugfix_version);
}
+std::string SysInfo::GetAndroidBuildCodename() {
+ char os_version_codename_str[PROP_VALUE_MAX];
+ __system_property_get("ro.build.version.codename", os_version_codename_str);
+ return std::string(os_version_codename_str);
+}
+
+std::string SysInfo::GetAndroidBuildID() {
+ char os_build_id_str[PROP_VALUE_MAX];
+ __system_property_get("ro.build.id", os_build_id_str);
+ return std::string(os_build_id_str);
+}
+
int SysInfo::DalvikHeapSizeMB() {
static int heap_size = GetDalvikHeapSizeMB();
return heap_size;
diff --git a/chromium/base/third_party/dynamic_annotations/BUILD.gn b/chromium/base/third_party/dynamic_annotations/BUILD.gn
index bc324ae4a78..86f6558401a 100644
--- a/chromium/base/third_party/dynamic_annotations/BUILD.gn
+++ b/chromium/base/third_party/dynamic_annotations/BUILD.gn
@@ -19,7 +19,7 @@ if (is_nacl) {
"dynamic_annotations.h",
]
if (is_android && !is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
}
diff --git a/chromium/base/third_party/superfasthash/OWNERS b/chromium/base/third_party/superfasthash/OWNERS
index f34cfb1c929..633cc350a88 100644
--- a/chromium/base/third_party/superfasthash/OWNERS
+++ b/chromium/base/third_party/superfasthash/OWNERS
@@ -1,2 +1 @@
mgiuca@chromium.org
-rvargas@chromium.org
diff --git a/chromium/base/thread_task_runner_handle.cc b/chromium/base/thread_task_runner_handle.cc
index 860a0ec31df..ee337b382e7 100644
--- a/chromium/base/thread_task_runner_handle.cc
+++ b/chromium/base/thread_task_runner_handle.cc
@@ -12,7 +12,7 @@ namespace base {
namespace {
-base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle> >
+base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle> >::Leaky
lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
} // namespace
diff --git a/chromium/base/threading/platform_thread.h b/chromium/base/threading/platform_thread.h
index 3468f45f91e..6b52cc4eb63 100644
--- a/chromium/base/threading/platform_thread.h
+++ b/chromium/base/threading/platform_thread.h
@@ -73,25 +73,9 @@ class PlatformThreadHandle {
typedef pthread_t Handle;
#endif
- PlatformThreadHandle()
- : handle_(0),
- id_(0) {
- }
-
- explicit PlatformThreadHandle(Handle handle)
- : handle_(handle),
- id_(0) {
- }
+ PlatformThreadHandle() : handle_(0) {}
- PlatformThreadHandle(Handle handle,
- PlatformThreadId id)
- : handle_(handle),
- id_(id) {
- }
-
- PlatformThreadId id() const {
- return id_;
- }
+ explicit PlatformThreadHandle(Handle handle) : handle_(handle) {}
bool is_equal(const PlatformThreadHandle& other) const {
return handle_ == other.handle_;
@@ -107,13 +91,12 @@ class PlatformThreadHandle {
private:
Handle handle_;
- PlatformThreadId id_;
};
const PlatformThreadId kInvalidThreadId(0);
-// Valid values for SetThreadPriority(), listed in increasing order of
-// importance.
+// Valid values for priority of Thread::Options and SimpleThread::Options, and
+// SetCurrentThreadPriority(), listed in increasing order of importance.
enum class ThreadPriority {
// Suitable for threads that shouldn't disrupt high priority work.
BACKGROUND,
@@ -172,12 +155,15 @@ class BASE_EXPORT PlatformThread {
// NOTE: When you are done with the thread handle, you must call Join to
// release system resources associated with the thread. You must ensure that
// the Delegate object outlives the thread.
- static bool Create(size_t stack_size, Delegate* delegate,
- PlatformThreadHandle* thread_handle);
+ static bool Create(size_t stack_size,
+ Delegate* delegate,
+ PlatformThreadHandle* thread_handle) {
+ return CreateWithPriority(stack_size, delegate, thread_handle,
+ ThreadPriority::NORMAL);
+ }
// CreateWithPriority() does the same thing as Create() except the priority of
- // the thread is set based on |priority|. Can be used in place of Create()
- // followed by SetThreadPriority().
+ // the thread is set based on |priority|.
static bool CreateWithPriority(size_t stack_size, Delegate* delegate,
PlatformThreadHandle* thread_handle,
ThreadPriority priority);
@@ -192,15 +178,15 @@ class BASE_EXPORT PlatformThread {
// |thread_handle|.
static void Join(PlatformThreadHandle thread_handle);
- // Toggles the target thread's priority at runtime. Prefer
- // CreateWithPriority() to set the thread's initial priority.
- // NOTE: The call may fail if the caller thread is not the same as the
- // target thread on POSIX. For example, seccomp-bpf blocks it by default
- // in the sandbox.
- static void SetThreadPriority(PlatformThreadHandle handle,
- ThreadPriority priority);
+ // Toggles the current thread's priority at runtime. A thread may not be able
+ // to raise its priority back up after lowering it if the process does not
+ // have a proper permission, e.g. CAP_SYS_NICE on Linux.
+ // Since changing other threads' priority is not permitted in favor of
+ // security, this interface is restricted to change only the current thread
+ // priority (https://crbug.com/399473).
+ static void SetCurrentThreadPriority(ThreadPriority priority);
- static ThreadPriority GetThreadPriority(PlatformThreadHandle handle);
+ static ThreadPriority GetCurrentThreadPriority();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
diff --git a/chromium/base/threading/platform_thread_android.cc b/chromium/base/threading/platform_thread_android.cc
index e661af5eab0..b6bea49b36c 100644
--- a/chromium/base/threading/platform_thread_android.cc
+++ b/chromium/base/threading/platform_thread_android.cc
@@ -37,8 +37,7 @@ const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
{ThreadPriority::REALTIME_AUDIO, -16},
};
-bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority priority) {
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
// On Android, we set the Audio priority through JNI as Audio priority
// will also allow the process to run while it is backgrounded.
if (priority == ThreadPriority::REALTIME_AUDIO) {
@@ -49,9 +48,15 @@ bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
return false;
}
-bool GetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority* priority) {
- NOTIMPLEMENTED();
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
+ DCHECK(priority);
+ *priority = ThreadPriority::NORMAL;
+ JNIEnv* env = base::android::AttachCurrentThread();
+ if (Java_ThreadUtils_isThreadPriorityAudio(
+ env, PlatformThread::CurrentId())) {
+ *priority = ThreadPriority::REALTIME_AUDIO;
+ return true;
+ }
return false;
}
@@ -81,8 +86,7 @@ void InitThreading() {
void InitOnThread() {
// Threads on linux/android may inherit their priority from the thread
// where they were created. This sets all new threads to the default.
- PlatformThread::SetThreadPriority(PlatformThread::CurrentHandle(),
- ThreadPriority::NORMAL);
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::NORMAL);
}
void TerminateOnThread() {
diff --git a/chromium/base/threading/platform_thread_freebsd.cc b/chromium/base/threading/platform_thread_freebsd.cc
index f4fded0ebb2..e29e865e63b 100644
--- a/chromium/base/threading/platform_thread_freebsd.cc
+++ b/chromium/base/threading/platform_thread_freebsd.cc
@@ -36,11 +36,8 @@ const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
{ThreadPriority::REALTIME_AUDIO, -10},
}
-bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority priority) {
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
#if !defined(OS_NACL)
- // TODO(gab): Assess the correctness of using |pthread_self()| below instead
- // of |handle|. http://crbug.com/468793.
return priority == ThreadPriority::REALTIME_AUDIO &&
pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
#else
@@ -48,11 +45,8 @@ bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
#endif
}
-bool GetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority* priority) {
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
#if !defined(OS_NACL)
- // TODO(gab): Assess the correctness of using |pthread_self()| below instead
- // of |handle|. http://crbug.com/468793.
int maybe_sched_rr = 0;
struct sched_param maybe_realtime_prio = {0};
if (pthread_getschedparam(pthread_self(), &maybe_sched_rr,
diff --git a/chromium/base/threading/platform_thread_internal_posix.h b/chromium/base/threading/platform_thread_internal_posix.h
index 62006ce13c9..05a8d1e26e7 100644
--- a/chromium/base/threading/platform_thread_internal_posix.h
+++ b/chromium/base/threading/platform_thread_internal_posix.h
@@ -26,17 +26,15 @@ int ThreadPriorityToNiceValue(ThreadPriority priority);
ThreadPriority NiceValueToThreadPriority(int nice_value);
// Allows platform specific tweaks to the generic POSIX solution for
-// SetThreadPriority. Returns true if the platform-specific implementation
-// handled this |priority| change, false if the generic implementation should
-// instead proceed.
-bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority priority);
-
-// Returns true if there is a platform-specific ThreadPriority set on |handle|
-// (and returns the actual ThreadPriority via |priority|). Returns false
-// otherwise, leaving |priority| untouched.
-bool GetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority* priority);
+// SetCurrentThreadPriority. Returns true if the platform-specific
+// implementation handled this |priority| change, false if the generic
+// implementation should instead proceed.
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority);
+
+// Returns true if there is a platform-specific ThreadPriority set on the
+// current thread (and returns the actual ThreadPriority via |priority|).
+// Returns false otherwise, leaving |priority| untouched.
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority);
} // namespace internal
diff --git a/chromium/base/threading/platform_thread_linux.cc b/chromium/base/threading/platform_thread_linux.cc
index e0f620e198a..48cf7443f7d 100644
--- a/chromium/base/threading/platform_thread_linux.cc
+++ b/chromium/base/threading/platform_thread_linux.cc
@@ -38,12 +38,11 @@ const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
{ThreadPriority::REALTIME_AUDIO, -10},
};
-bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority priority) {
+bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
#if !defined(OS_NACL)
ThreadPriority current_priority;
if (priority != ThreadPriority::REALTIME_AUDIO &&
- GetThreadPriorityForPlatform(handle, &current_priority) &&
+ GetCurrentThreadPriorityForPlatform(&current_priority) &&
current_priority == ThreadPriority::REALTIME_AUDIO) {
// If the pthread's round-robin scheduler is already enabled, and the new
// priority will use setpriority() instead, the pthread scheduler should be
@@ -51,8 +50,6 @@ bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
pthread_setschedparam(pthread_self(), SCHED_OTHER, &kResetPrio);
return false;
}
- // TODO(gab): Assess the correctness of using |pthread_self()| below instead
- // of |handle|. http://crbug.com/468793.
return priority == ThreadPriority::REALTIME_AUDIO &&
pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
#else
@@ -60,13 +57,10 @@ bool SetThreadPriorityForPlatform(PlatformThreadHandle handle,
#endif
}
-bool GetThreadPriorityForPlatform(PlatformThreadHandle handle,
- ThreadPriority* priority) {
+bool GetCurrentThreadPriorityForPlatform(ThreadPriority* priority) {
#if !defined(OS_NACL)
int maybe_sched_rr = 0;
struct sched_param maybe_realtime_prio = {0};
- // TODO(gab): Assess the correctness of using |pthread_self()| below instead
- // of |handle|. http://crbug.com/468793.
if (pthread_getschedparam(pthread_self(), &maybe_sched_rr,
&maybe_realtime_prio) == 0 &&
maybe_sched_rr == SCHED_RR &&
diff --git a/chromium/base/threading/platform_thread_mac.mm b/chromium/base/threading/platform_thread_mac.mm
index 813cae26d2b..1ecbcd6ad91 100644
--- a/chromium/base/threading/platform_thread_mac.mm
+++ b/chromium/base/threading/platform_thread_mac.mm
@@ -155,10 +155,10 @@ void SetPriorityRealtimeAudio(mach_port_t mach_thread_id) {
} // anonymous namespace
// static
-void PlatformThread::SetThreadPriority(PlatformThreadHandle handle,
- ThreadPriority priority) {
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
// Convert from pthread_t to mach thread identifier.
- mach_port_t mach_thread_id = pthread_mach_thread_np(handle.platform_handle());
+ mach_port_t mach_thread_id =
+ pthread_mach_thread_np(PlatformThread::CurrentHandle().platform_handle());
switch (priority) {
case ThreadPriority::NORMAL:
@@ -174,7 +174,7 @@ void PlatformThread::SetThreadPriority(PlatformThreadHandle handle,
}
// static
-ThreadPriority PlatformThread::GetThreadPriority(PlatformThreadHandle handle) {
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
NOTIMPLEMENTED();
return ThreadPriority::NORMAL;
}
diff --git a/chromium/base/threading/platform_thread_posix.cc b/chromium/base/threading/platform_thread_posix.cc
index 0d821a9b7ad..0adb92dbce5 100644
--- a/chromium/base/threading/platform_thread_posix.cc
+++ b/chromium/base/threading/platform_thread_posix.cc
@@ -13,11 +13,9 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread_internal_posix.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_restrictions.h"
-#include "base/tracked_objects.h"
#if defined(OS_LINUX)
#include <sys/syscall.h>
@@ -36,38 +34,28 @@ namespace {
struct ThreadParams {
ThreadParams()
- : delegate(NULL),
- joinable(false),
- priority(ThreadPriority::NORMAL),
- handle(NULL),
- handle_set(false, false) {
- }
+ : delegate(NULL), joinable(false), priority(ThreadPriority::NORMAL) {}
PlatformThread::Delegate* delegate;
bool joinable;
ThreadPriority priority;
- PlatformThreadHandle* handle;
- WaitableEvent handle_set;
};
void* ThreadFunc(void* params) {
base::InitOnThread();
- ThreadParams* thread_params = static_cast<ThreadParams*>(params);
- PlatformThread::Delegate* delegate = thread_params->delegate;
- if (!thread_params->joinable)
- base::ThreadRestrictions::SetSingletonAllowed(false);
+ PlatformThread::Delegate* delegate = nullptr;
- if (thread_params->priority != ThreadPriority::NORMAL) {
- PlatformThread::SetThreadPriority(PlatformThread::CurrentHandle(),
- thread_params->priority);
- }
+ {
+ scoped_ptr<ThreadParams> thread_params(static_cast<ThreadParams*>(params));
- // Stash the id in the handle so the calling thread has a complete
- // handle, and unblock the parent thread.
- *(thread_params->handle) = PlatformThreadHandle(pthread_self(),
- PlatformThread::CurrentId());
- thread_params->handle_set.Signal();
+ delegate = thread_params->delegate;
+ if (!thread_params->joinable)
+ base::ThreadRestrictions::SetSingletonAllowed(false);
+
+ if (thread_params->priority != ThreadPriority::NORMAL)
+ PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+ }
ThreadIdNameManager::GetInstance()->RegisterThread(
PlatformThread::CurrentHandle().platform_handle(),
@@ -83,21 +71,21 @@ void* ThreadFunc(void* params) {
return NULL;
}
-bool CreateThread(size_t stack_size, bool joinable,
+bool CreateThread(size_t stack_size,
+ bool joinable,
PlatformThread::Delegate* delegate,
PlatformThreadHandle* thread_handle,
ThreadPriority priority) {
+ DCHECK(thread_handle);
base::InitThreading();
- bool success = false;
pthread_attr_t attributes;
pthread_attr_init(&attributes);
// Pthreads are joinable by default, so only specify the detached
// attribute if the thread should be non-joinable.
- if (!joinable) {
+ if (!joinable)
pthread_attr_setdetachstate(&attributes, PTHREAD_CREATE_DETACHED);
- }
// Get a better default if available.
if (stack_size == 0)
@@ -106,33 +94,27 @@ bool CreateThread(size_t stack_size, bool joinable,
if (stack_size > 0)
pthread_attr_setstacksize(&attributes, stack_size);
- ThreadParams params;
- params.delegate = delegate;
- params.joinable = joinable;
- params.priority = priority;
- params.handle = thread_handle;
+ scoped_ptr<ThreadParams> params(new ThreadParams);
+ params->delegate = delegate;
+ params->joinable = joinable;
+ params->priority = priority;
pthread_t handle;
- int err = pthread_create(&handle,
- &attributes,
- ThreadFunc,
- &params);
- success = !err;
- if (!success) {
+ int err = pthread_create(&handle, &attributes, ThreadFunc, params.get());
+ bool success = !err;
+ if (success) {
+ // ThreadParams should be deleted on the created thread after used.
+ ignore_result(params.release());
+ } else {
// Value of |handle| is undefined if pthread_create fails.
handle = 0;
errno = err;
PLOG(ERROR) << "pthread_create";
}
+ *thread_handle = PlatformThreadHandle(handle);
pthread_attr_destroy(&attributes);
- // Don't let this call complete until the thread id
- // is set in the handle.
- if (success)
- params.handle_set.Wait();
- CHECK_EQ(handle, thread_handle->platform_handle());
-
return success;
}
@@ -167,7 +149,7 @@ PlatformThreadRef PlatformThread::CurrentRef() {
// static
PlatformThreadHandle PlatformThread::CurrentHandle() {
- return PlatformThreadHandle(pthread_self(), CurrentId());
+ return PlatformThreadHandle(pthread_self());
}
// static
@@ -196,18 +178,9 @@ const char* PlatformThread::GetName() {
}
// static
-bool PlatformThread::Create(size_t stack_size, Delegate* delegate,
- PlatformThreadHandle* thread_handle) {
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
- return CreateThread(stack_size, true /* joinable thread */,
- delegate, thread_handle, ThreadPriority::NORMAL);
-}
-
-// static
bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
PlatformThreadHandle* thread_handle,
ThreadPriority priority) {
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
return CreateThread(stack_size, true, // joinable thread
delegate, thread_handle, priority);
}
@@ -216,7 +189,6 @@ bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
PlatformThreadHandle unused;
- base::ThreadRestrictions::ScopedAllowWait allow_wait;
bool result = CreateThread(stack_size, false /* non-joinable thread */,
delegate, &unused, ThreadPriority::NORMAL);
return result;
@@ -231,16 +203,15 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), NULL));
}
-// Mac has its own Set/GetThreadPriority() implementations.
+// Mac has its own Set/GetCurrentThreadPriority() implementations.
#if !defined(OS_MACOSX)
// static
-void PlatformThread::SetThreadPriority(PlatformThreadHandle handle,
- ThreadPriority priority) {
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
#if defined(OS_NACL)
NOTIMPLEMENTED();
#else
- if (internal::SetThreadPriorityForPlatform(handle, priority))
+ if (internal::SetCurrentThreadPriorityForPlatform(priority))
return;
// setpriority(2) should change the whole thread group's (i.e. process)
@@ -249,39 +220,34 @@ void PlatformThread::SetThreadPriority(PlatformThreadHandle handle,
// Linux/NPTL implementation of POSIX threads, the nice value is a per-thread
// attribute". Also, 0 is prefered to the current thread id since it is
// equivalent but makes sandboxing easier (https://crbug.com/399473).
- DCHECK_NE(handle.id(), kInvalidThreadId);
const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
- const PlatformThreadId current_id = PlatformThread::CurrentId();
- if (setpriority(PRIO_PROCESS, handle.id() == current_id ? 0 : handle.id(),
- nice_setting)) {
- DVPLOG(1) << "Failed to set nice value of thread (" << handle.id()
- << ") to " << nice_setting;
+ if (setpriority(PRIO_PROCESS, 0, nice_setting)) {
+ DVPLOG(1) << "Failed to set nice value of thread ("
+ << PlatformThread::CurrentId() << ") to " << nice_setting;
}
#endif // defined(OS_NACL)
}
// static
-ThreadPriority PlatformThread::GetThreadPriority(PlatformThreadHandle handle) {
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
#if defined(OS_NACL)
NOTIMPLEMENTED();
return ThreadPriority::NORMAL;
#else
- // Mirrors SetThreadPriority()'s implementation.
+ // Mirrors SetCurrentThreadPriority()'s implementation.
ThreadPriority platform_specific_priority;
- if (internal::GetThreadPriorityForPlatform(handle,
- &platform_specific_priority)) {
+ if (internal::GetCurrentThreadPriorityForPlatform(
+ &platform_specific_priority)) {
return platform_specific_priority;
}
- DCHECK_NE(handle.id(), kInvalidThreadId);
- const PlatformThreadId current_id = PlatformThread::CurrentId();
// Need to clear errno before calling getpriority():
// http://man7.org/linux/man-pages/man2/getpriority.2.html
errno = 0;
- int nice_value =
- getpriority(PRIO_PROCESS, handle.id() == current_id ? 0 : handle.id());
+ int nice_value = getpriority(PRIO_PROCESS, 0);
if (errno != 0) {
- DVPLOG(1) << "Failed to get nice value of thread (" << handle.id() << ")";
+ DVPLOG(1) << "Failed to get nice value of thread ("
+ << PlatformThread::CurrentId() << ")";
return ThreadPriority::NORMAL;
}
diff --git a/chromium/base/threading/platform_thread_unittest.cc b/chromium/base/threading/platform_thread_unittest.cc
index c4b3d5d7ecc..1ac08a77ae2 100644
--- a/chromium/base/threading/platform_thread_unittest.cc
+++ b/chromium/base/threading/platform_thread_unittest.cc
@@ -8,7 +8,10 @@
#include "base/threading/platform_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if defined(OS_WIN)
+#if defined(OS_POSIX)
+#include <sys/types.h>
+#include <unistd.h>
+#elif defined(OS_WIN)
#include <windows.h>
#endif
@@ -16,6 +19,8 @@ namespace base {
// Trivial tests that thread runs and doesn't crash on create and join ---------
+namespace {
+
class TrivialThread : public PlatformThread::Delegate {
public:
TrivialThread() : did_run_(false) {}
@@ -30,6 +35,8 @@ class TrivialThread : public PlatformThread::Delegate {
DISALLOW_COPY_AND_ASSIGN(TrivialThread);
};
+} // namespace
+
TEST(PlatformThreadTest, Trivial) {
TrivialThread thread;
PlatformThreadHandle handle;
@@ -56,11 +63,13 @@ TEST(PlatformThreadTest, TrivialTimesTen) {
// Tests of basic thread functions ---------------------------------------------
+namespace {
+
class FunctionTestThread : public PlatformThread::Delegate {
public:
FunctionTestThread()
: thread_id_(kInvalidThreadId),
- thread_started_(true, false),
+ termination_ready_(true, false),
terminate_thread_(true, false),
done_(false) {}
~FunctionTestThread() override {
@@ -70,8 +79,9 @@ class FunctionTestThread : public PlatformThread::Delegate {
<< "WaitableEvent blocking the underlying thread's main.";
}
- // Grabs |thread_id_|, signals |thread_started_|, and then waits for
- // |terminate_thread_| to be signaled before exiting.
+ // Grabs |thread_id_|, runs an optional test on that thread, signals
+ // |termination_ready_|, and then waits for |terminate_thread_| to be
+ // signaled before exiting.
void ThreadMain() override {
thread_id_ = PlatformThread::CurrentId();
EXPECT_NE(thread_id_, kInvalidThreadId);
@@ -79,39 +89,46 @@ class FunctionTestThread : public PlatformThread::Delegate {
// Make sure that the thread ID is the same across calls.
EXPECT_EQ(thread_id_, PlatformThread::CurrentId());
- thread_started_.Signal();
+ // Run extra tests.
+ RunTest();
+ termination_ready_.Signal();
terminate_thread_.Wait();
done_ = true;
}
PlatformThreadId thread_id() const {
- EXPECT_TRUE(thread_started_.IsSignaled()) << "Thread ID still unknown";
+ EXPECT_TRUE(termination_ready_.IsSignaled()) << "Thread ID still unknown";
return thread_id_;
}
bool IsRunning() const {
- return thread_started_.IsSignaled() && !done_;
+ return termination_ready_.IsSignaled() && !done_;
}
- // Blocks until this thread is started.
- void WaitForThreadStart() { thread_started_.Wait(); }
+ // Blocks until this thread is started and ready to be terminated.
+ void WaitForTerminationReady() { termination_ready_.Wait(); }
- // Mark this thread for termination (callers must then join this thread to be
+ // Marks this thread for termination (callers must then join this thread to be
// guaranteed of termination).
void MarkForTermination() { terminate_thread_.Signal(); }
private:
+ // Runs an optional test on the newly created thread.
+ virtual void RunTest() {}
+
PlatformThreadId thread_id_;
- mutable WaitableEvent thread_started_;
+ mutable WaitableEvent termination_ready_;
WaitableEvent terminate_thread_;
bool done_;
DISALLOW_COPY_AND_ASSIGN(FunctionTestThread);
};
+} // namespace
+
TEST(PlatformThreadTest, Function) {
PlatformThreadId main_thread_id = PlatformThread::CurrentId();
@@ -120,7 +137,7 @@ TEST(PlatformThreadTest, Function) {
ASSERT_FALSE(thread.IsRunning());
ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
- thread.WaitForThreadStart();
+ thread.WaitForTerminationReady();
ASSERT_TRUE(thread.IsRunning());
EXPECT_NE(thread.thread_id(), main_thread_id);
@@ -144,7 +161,7 @@ TEST(PlatformThreadTest, FunctionTimesTen) {
for (size_t n = 0; n < arraysize(thread); n++)
ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
for (size_t n = 0; n < arraysize(thread); n++)
- thread[n].WaitForThreadStart();
+ thread[n].WaitForTerminationReady();
for (size_t n = 0; n < arraysize(thread); n++) {
ASSERT_TRUE(thread[n].IsRunning());
@@ -170,106 +187,86 @@ TEST(PlatformThreadTest, FunctionTimesTen) {
namespace {
const ThreadPriority kThreadPriorityTestValues[] = {
-// Disable non-normal priority toggling on POSIX as it appears to be broken
-// (http://crbug.com/468793). This is prefered to disabling the tests altogether
-// on POSIX as it at least provides coverage for running this code under
-// "normal" priority.
-#if !defined(OS_POSIX)
- ThreadPriority::DISPLAY,
+// The order should be higher to lower to cover as much cases as possible on
+// Linux trybots running without CAP_SYS_NICE permission.
+#if !defined(OS_ANDROID)
+ // PlatformThread::GetCurrentThreadPriority() on Android does not support
+ // REALTIME_AUDIO case. See http://crbug.com/505474.
ThreadPriority::REALTIME_AUDIO,
- // Keep BACKGROUND second to last to test backgrounding from other
- // priorities.
+#endif
+ ThreadPriority::DISPLAY,
+ // This redundant BACKGROUND priority is to test backgrounding from other
+ // priorities, and unbackgrounding.
ThreadPriority::BACKGROUND,
-#endif // !defined(OS_POSIX)
- // Keep NORMAL last to test unbackgrounding.
- ThreadPriority::NORMAL
-};
-
-} // namespace
-
-// Test changing another thread's priority.
-// NOTE: This test is partially disabled on POSIX, see note above and
-// http://crbug.com/468793.
-TEST(PlatformThreadTest, ThreadPriorityOtherThread) {
- PlatformThreadHandle current_handle(PlatformThread::CurrentHandle());
-
- // Confirm that the current thread's priority is as expected.
- EXPECT_EQ(ThreadPriority::NORMAL,
- PlatformThread::GetThreadPriority(current_handle));
-
- // Create a test thread.
- FunctionTestThread thread;
- PlatformThreadHandle handle;
- ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
- thread.WaitForThreadStart();
- EXPECT_NE(thread.thread_id(), kInvalidThreadId);
- EXPECT_NE(thread.thread_id(), PlatformThread::CurrentId());
-
- // New threads should get normal priority by default.
- EXPECT_EQ(ThreadPriority::NORMAL, PlatformThread::GetThreadPriority(handle));
-
- // Toggle each supported priority on the test thread and confirm it only
- // affects it (and not the current thread).
- for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
- SCOPED_TRACE(i);
+ ThreadPriority::NORMAL,
+ ThreadPriority::BACKGROUND};
+
+bool IsBumpingPriorityAllowed() {
+#if defined(OS_POSIX)
+ // Only root can raise thread priority on POSIX environment. On Linux, users
+ // who have CAP_SYS_NICE permission also can raise the thread priority, but
+ // libcap.so would be needed to check the capability.
+ return geteuid() == 0;
+#else
+ return true;
+#endif
+}
- // Alter and verify the test thread's priority.
- PlatformThread::SetThreadPriority(handle, kThreadPriorityTestValues[i]);
- EXPECT_EQ(kThreadPriorityTestValues[i],
- PlatformThread::GetThreadPriority(handle));
+class ThreadPriorityTestThread : public FunctionTestThread {
+ public:
+ ThreadPriorityTestThread() = default;
+ ~ThreadPriorityTestThread() override = default;
- // Make sure the current thread was otherwise unaffected.
+ private:
+ void RunTest() override {
+ // Confirm that the current thread's priority is as expected.
EXPECT_EQ(ThreadPriority::NORMAL,
- PlatformThread::GetThreadPriority(current_handle));
+ PlatformThread::GetCurrentThreadPriority());
+
+ // Toggle each supported priority on the current thread and confirm it
+ // affects it.
+ const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
+ for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
+ SCOPED_TRACE(i);
+ if (!bumping_priority_allowed &&
+ kThreadPriorityTestValues[i] >
+ PlatformThread::GetCurrentThreadPriority()) {
+ continue;
+ }
+
+ // Alter and verify the current thread's priority.
+ PlatformThread::SetCurrentThreadPriority(kThreadPriorityTestValues[i]);
+ EXPECT_EQ(kThreadPriorityTestValues[i],
+ PlatformThread::GetCurrentThreadPriority());
+ }
}
- thread.MarkForTermination();
- PlatformThread::Join(handle);
-}
+ DISALLOW_COPY_AND_ASSIGN(ThreadPriorityTestThread);
+};
-// Test changing the current thread's priority (which has different semantics on
-// some platforms).
-// NOTE: This test is partially disabled on POSIX, see note above and
-// http://crbug.com/468793.
-TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
- PlatformThreadHandle current_handle(PlatformThread::CurrentHandle());
+} // namespace
- // Confirm that the current thread's priority is as expected.
- EXPECT_EQ(ThreadPriority::NORMAL,
- PlatformThread::GetThreadPriority(current_handle));
+#if defined(OS_MACOSX)
+// PlatformThread::GetCurrentThreadPriority() is not implemented on OS X.
+#define MAYBE_ThreadPriorityCurrentThread DISABLED_ThreadPriorityCurrentThread
+#else
+#define MAYBE_ThreadPriorityCurrentThread ThreadPriorityCurrentThread
+#endif
- // Create a test thread for verification purposes only.
- FunctionTestThread thread;
+// Test changing a created thread's priority (which has different semantics on
+// some platforms).
+TEST(PlatformThreadTest, MAYBE_ThreadPriorityCurrentThread) {
+ ThreadPriorityTestThread thread;
PlatformThreadHandle handle;
- ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
- thread.WaitForThreadStart();
- EXPECT_NE(thread.thread_id(), kInvalidThreadId);
- EXPECT_NE(thread.thread_id(), PlatformThread::CurrentId());
-
- // Confirm that the new thread's priority is as expected.
- EXPECT_EQ(ThreadPriority::NORMAL, PlatformThread::GetThreadPriority(handle));
-
- // Toggle each supported priority on the current thread and confirm it only
- // affects it (and not the test thread).
- for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
- SCOPED_TRACE(i);
-
- // Alter and verify the current thread's priority.
- PlatformThread::SetThreadPriority(current_handle,
- kThreadPriorityTestValues[i]);
- EXPECT_EQ(kThreadPriorityTestValues[i],
- PlatformThread::GetThreadPriority(current_handle));
-
- // Make sure the test thread was otherwise unaffected.
- EXPECT_EQ(ThreadPriority::NORMAL,
- PlatformThread::GetThreadPriority(handle));
- }
- // Restore current thread priority for follow-up tests.
- PlatformThread::SetThreadPriority(current_handle, ThreadPriority::NORMAL);
+ ASSERT_FALSE(thread.IsRunning());
+ ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+ thread.WaitForTerminationReady();
+ ASSERT_TRUE(thread.IsRunning());
thread.MarkForTermination();
PlatformThread::Join(handle);
+ ASSERT_FALSE(thread.IsRunning());
}
} // namespace base
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index c2eab6ccfd3..25973bcada9 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -55,10 +55,8 @@ DWORD __stdcall ThreadFunc(void* params) {
if (!thread_params->joinable)
base::ThreadRestrictions::SetSingletonAllowed(false);
- if (thread_params->priority != ThreadPriority::NORMAL) {
- PlatformThread::SetThreadPriority(
- PlatformThread::CurrentHandle(), thread_params->priority);
- }
+ if (thread_params->priority != ThreadPriority::NORMAL)
+ PlatformThread::SetCurrentThreadPriority(thread_params->priority);
// Retrieve a copy of the thread handle to use as the key in the
// thread name mapping.
@@ -89,12 +87,12 @@ DWORD __stdcall ThreadFunc(void* params) {
PlatformThread::CurrentId());
}
- return NULL;
+ return 0;
}
// CreateThreadInternal() matches PlatformThread::CreateWithPriority(), except
-// that |out_thread_handle| may be NULL, in which case a non-joinable thread is
-// created.
+// that |out_thread_handle| may be nullptr, in which case a non-joinable thread
+// is created.
bool CreateThreadInternal(size_t stack_size,
PlatformThread::Delegate* delegate,
PlatformThreadHandle* out_thread_handle,
@@ -108,7 +106,7 @@ bool CreateThreadInternal(size_t stack_size,
ThreadParams* params = new ThreadParams;
params->delegate = delegate;
- params->joinable = out_thread_handle != NULL;
+ params->joinable = out_thread_handle != nullptr;
params->priority = priority;
// Using CreateThread here vs _beginthreadex makes thread creation a bit
@@ -116,16 +114,15 @@ bool CreateThreadInternal(size_t stack_size,
// have to work running on CreateThread() threads anyway, since we run code
// on the Windows thread pool, etc. For some background on the difference:
// http://www.microsoft.com/msj/1099/win32/win321099.aspx
- PlatformThreadId thread_id;
- void* thread_handle = CreateThread(
- NULL, stack_size, ThreadFunc, params, flags, &thread_id);
+ void* thread_handle =
+ ::CreateThread(nullptr, stack_size, ThreadFunc, params, flags, nullptr);
if (!thread_handle) {
delete params;
return false;
}
if (out_thread_handle)
- *out_thread_handle = PlatformThreadHandle(thread_handle, thread_id);
+ *out_thread_handle = PlatformThreadHandle(thread_handle);
else
CloseHandle(thread_handle);
return true;
@@ -191,13 +188,6 @@ const char* PlatformThread::GetName() {
}
// static
-bool PlatformThread::Create(size_t stack_size, Delegate* delegate,
- PlatformThreadHandle* thread_handle) {
- return CreateWithPriority(
- stack_size, delegate, thread_handle, ThreadPriority::NORMAL);
-}
-
-// static
bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
PlatformThreadHandle* thread_handle,
ThreadPriority priority) {
@@ -207,8 +197,8 @@ bool PlatformThread::CreateWithPriority(size_t stack_size, Delegate* delegate,
// static
bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
- return CreateThreadInternal(
- stack_size, delegate, NULL, ThreadPriority::NORMAL);
+ return CreateThreadInternal(stack_size, delegate, nullptr,
+ ThreadPriority::NORMAL);
}
// static
@@ -238,10 +228,7 @@ void PlatformThread::Join(PlatformThreadHandle thread_handle) {
}
// static
-void PlatformThread::SetThreadPriority(PlatformThreadHandle handle,
- ThreadPriority priority) {
- DCHECK(!handle.is_null());
-
+void PlatformThread::SetCurrentThreadPriority(ThreadPriority priority) {
int desired_priority = THREAD_PRIORITY_ERROR_RETURN;
switch (priority) {
case ThreadPriority::BACKGROUND:
@@ -265,16 +252,16 @@ void PlatformThread::SetThreadPriority(PlatformThreadHandle handle,
#ifndef NDEBUG
const BOOL success =
#endif
- ::SetThreadPriority(handle.platform_handle(), desired_priority);
+ ::SetThreadPriority(PlatformThread::CurrentHandle().platform_handle(),
+ desired_priority);
DPLOG_IF(ERROR, !success) << "Failed to set thread priority to "
<< desired_priority;
}
// static
-ThreadPriority PlatformThread::GetThreadPriority(PlatformThreadHandle handle) {
- DCHECK(!handle.is_null());
-
- int priority = ::GetThreadPriority(handle.platform_handle());
+ThreadPriority PlatformThread::GetCurrentThreadPriority() {
+ int priority =
+ ::GetThreadPriority(PlatformThread::CurrentHandle().platform_handle());
switch (priority) {
case THREAD_PRIORITY_LOWEST:
return ThreadPriority::BACKGROUND;
diff --git a/chromium/base/threading/post_task_and_reply_impl.h b/chromium/base/threading/post_task_and_reply_impl.h
index a5b9580e6dc..d21ab78de85 100644
--- a/chromium/base/threading/post_task_and_reply_impl.h
+++ b/chromium/base/threading/post_task_and_reply_impl.h
@@ -25,6 +25,8 @@ namespace internal {
// may want base::WorkerPool.
class PostTaskAndReplyImpl {
public:
+ virtual ~PostTaskAndReplyImpl() = default;
+
// Implementation for TaskRunner::PostTaskAndReply and
// WorkerPool::PostTaskAndReply.
bool PostTaskAndReply(const tracked_objects::Location& from_here,
diff --git a/chromium/base/threading/sequenced_worker_pool.cc b/chromium/base/threading/sequenced_worker_pool.cc
index 7bbca92a2fb..54a6bc8245d 100644
--- a/chromium/base/threading/sequenced_worker_pool.cc
+++ b/chromium/base/threading/sequenced_worker_pool.cc
@@ -619,9 +619,10 @@ bool SequencedWorkerPool::Inner::PostTask(
// The trace_id is used for identifying the task in about:tracing.
sequenced.trace_id = trace_id_++;
- TRACE_EVENT_FLOW_BEGIN0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- "SequencedWorkerPool::PostTask",
- TRACE_ID_MANGLE(GetTaskTraceID(sequenced, static_cast<void*>(this))));
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ "SequencedWorkerPool::Inner::PostTask",
+ TRACE_ID_MANGLE(GetTaskTraceID(sequenced, static_cast<void*>(this))),
+ TRACE_EVENT_FLAG_FLOW_OUT);
sequenced.sequence_task_number = LockedGetNextSequenceTaskNumber();
@@ -754,12 +755,12 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
GetWorkStatus status =
GetWork(&task, &wait_time, &delete_these_outside_lock);
if (status == GET_WORK_FOUND) {
- TRACE_EVENT_FLOW_END0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- "SequencedWorkerPool::PostTask",
- TRACE_ID_MANGLE(GetTaskTraceID(task, static_cast<void*>(this))));
- TRACE_EVENT2("toplevel", "SequencedWorkerPool::ThreadLoop",
- "src_file", task.posted_from.file_name(),
- "src_func", task.posted_from.function_name());
+ TRACE_EVENT_WITH_FLOW2(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ "SequencedWorkerPool::Inner::ThreadLoop",
+ TRACE_ID_MANGLE(GetTaskTraceID(task, static_cast<void*>(this))),
+ TRACE_EVENT_FLAG_FLOW_IN,
+ "src_file", task.posted_from.file_name(),
+ "src_func", task.posted_from.function_name());
int new_thread_id = WillRunWorkerTask(task);
{
AutoUnlock unlock(lock_);
diff --git a/chromium/base/threading/sequenced_worker_pool_unittest.cc b/chromium/base/threading/sequenced_worker_pool_unittest.cc
index 05989a5487a..bf82b110357 100644
--- a/chromium/base/threading/sequenced_worker_pool_unittest.cc
+++ b/chromium/base/threading/sequenced_worker_pool_unittest.cc
@@ -948,6 +948,8 @@ class SequencedWorkerPoolTaskRunnerTestDelegate {
INSTANTIATE_TYPED_TEST_CASE_P(
SequencedWorkerPool, TaskRunnerTest,
SequencedWorkerPoolTaskRunnerTestDelegate);
+INSTANTIATE_TYPED_TEST_CASE_P(SequencedWorkerPool, TaskRunnerAffinityTest,
+ SequencedWorkerPoolTaskRunnerTestDelegate);
class SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate {
public:
@@ -985,6 +987,9 @@ class SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate {
INSTANTIATE_TYPED_TEST_CASE_P(
SequencedWorkerPoolTaskRunner, TaskRunnerTest,
SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate);
+INSTANTIATE_TYPED_TEST_CASE_P(
+ SequencedWorkerPoolTaskRunner, TaskRunnerAffinityTest,
+ SequencedWorkerPoolTaskRunnerWithShutdownBehaviorTestDelegate);
class SequencedWorkerPoolSequencedTaskRunnerTestDelegate {
public:
@@ -1022,10 +1027,17 @@ class SequencedWorkerPoolSequencedTaskRunnerTestDelegate {
INSTANTIATE_TYPED_TEST_CASE_P(
SequencedWorkerPoolSequencedTaskRunner, TaskRunnerTest,
SequencedWorkerPoolSequencedTaskRunnerTestDelegate);
+INSTANTIATE_TYPED_TEST_CASE_P(
+ SequencedWorkerPoolSequencedTaskRunner, TaskRunnerAffinityTest,
+ SequencedWorkerPoolSequencedTaskRunnerTestDelegate);
INSTANTIATE_TYPED_TEST_CASE_P(
SequencedWorkerPoolSequencedTaskRunner, SequencedTaskRunnerTest,
SequencedWorkerPoolSequencedTaskRunnerTestDelegate);
+INSTANTIATE_TYPED_TEST_CASE_P(
+ SequencedWorkerPoolSequencedTaskRunner,
+ SequencedTaskRunnerDelayedTest,
+ SequencedWorkerPoolSequencedTaskRunnerTestDelegate);
} // namespace
diff --git a/chromium/base/threading/thread.cc b/chromium/base/threading/thread.cc
index 7bff24232e2..4b517a1ef26 100644
--- a/chromium/base/threading/thread.cc
+++ b/chromium/base/threading/thread.cc
@@ -62,9 +62,12 @@ Thread::Thread(const std::string& name)
stopping_(false),
running_(false),
thread_(0),
+ id_(kInvalidThreadId),
+ id_event_(true, false),
message_loop_(nullptr),
message_loop_timer_slack_(TIMER_SLACK_NONE),
- name_(name) {
+ name_(name),
+ start_event_(false, false) {
}
Thread::~Thread() {
@@ -87,6 +90,10 @@ bool Thread::StartWithOptions(const Options& options) {
(options.message_loop_type == MessageLoop::TYPE_UI));
#endif
+ // Reset |id_| here to support restarting the thread.
+ id_event_.Reset();
+ id_ = kInvalidThreadId;
+
SetThreadWasQuitProperly(false);
MessageLoop::Type type = options.message_loop_type;
@@ -97,23 +104,16 @@ bool Thread::StartWithOptions(const Options& options) {
scoped_ptr<MessageLoop> message_loop = MessageLoop::CreateUnbound(
type, options.message_pump_factory);
message_loop_ = message_loop.get();
- start_event_.reset(new WaitableEvent(false, false));
+ start_event_.Reset();
// Hold the thread_lock_ while starting a new thread, so that we can make sure
// that thread_ is populated before the newly created thread accesses it.
{
AutoLock lock(thread_lock_);
- bool created;
- if (options.priority == ThreadPriority::NORMAL) {
- created = PlatformThread::Create(options.stack_size, this, &thread_);
- } else {
- created = PlatformThread::CreateWithPriority(options.stack_size, this,
- &thread_, options.priority);
- }
- if (!created) {
+ if (!PlatformThread::CreateWithPriority(options.stack_size, this, &thread_,
+ options.priority)) {
DLOG(ERROR) << "failed to create thread";
message_loop_ = nullptr;
- start_event_.reset();
return false;
}
}
@@ -134,16 +134,17 @@ bool Thread::StartAndWaitForTesting() {
return true;
}
-bool Thread::WaitUntilThreadStarted() {
- if (!start_event_)
+bool Thread::WaitUntilThreadStarted() const {
+ if (!message_loop_)
return false;
base::ThreadRestrictions::ScopedAllowWait allow_wait;
- start_event_->Wait();
+ start_event_.Wait();
return true;
}
void Thread::Stop() {
- if (!start_event_)
+ AutoLock lock(thread_lock_);
+ if (thread_.is_null())
return;
StopSoon();
@@ -154,20 +155,18 @@ void Thread::Stop() {
// the thread exits. Some consumers are abusing the API. Make them stop.
//
PlatformThread::Join(thread_);
+ thread_ = base::PlatformThreadHandle();
- // The thread should NULL message_loop_ on exit.
+ // The thread should nullify message_loop_ on exit.
DCHECK(!message_loop_);
- // The thread no longer needs to be joined.
- start_event_.reset();
-
stopping_ = false;
}
void Thread::StopSoon() {
// We should only be called on the same thread that started us.
- DCHECK_NE(thread_id(), PlatformThread::CurrentId());
+ DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
if (stopping_ || !message_loop_)
return;
@@ -176,9 +175,11 @@ void Thread::StopSoon() {
task_runner()->PostTask(FROM_HERE, base::Bind(&ThreadQuitHelper));
}
-PlatformThreadId Thread::thread_id() const {
- AutoLock lock(thread_lock_);
- return thread_.id();
+PlatformThreadId Thread::GetThreadId() const {
+ // If the thread is created but not started yet, wait for |id_| being ready.
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ id_event_.Wait();
+ return id_;
}
bool Thread::IsRunning() const {
@@ -211,6 +212,12 @@ bool Thread::GetThreadWasQuitProperly() {
}
void Thread::ThreadMain() {
+ // First, make GetThreadId() available to avoid deadlocks. It could be called
+ // any place in the following thread initialization code.
+ id_ = PlatformThread::CurrentId();
+ DCHECK_NE(kInvalidThreadId, id_);
+ id_event_.Signal();
+
// Complete the initialization of our Thread object.
PlatformThread::SetName(name_.c_str());
ANNOTATE_THREAD_NAME(name_.c_str()); // Tell the name to race detector.
@@ -231,10 +238,6 @@ void Thread::ThreadMain() {
}
#endif
- // Make sure the thread_id() returns current thread.
- // (This internally acquires lock against PlatformThread::Create)
- DCHECK_EQ(thread_id(), PlatformThread::CurrentId());
-
// Let the thread do extra initialization.
Init();
@@ -243,7 +246,7 @@ void Thread::ThreadMain() {
running_ = true;
}
- start_event_->Signal();
+ start_event_.Signal();
Run(message_loop_);
@@ -264,7 +267,7 @@ void Thread::ThreadMain() {
// We can't receive messages anymore.
// (The message loop is destructed at the end of this block)
- message_loop_ = NULL;
+ message_loop_ = nullptr;
}
} // namespace base
diff --git a/chromium/base/threading/thread.h b/chromium/base/threading/thread.h
index 5126491b386..c8a1c803156 100644
--- a/chromium/base/threading/thread.h
+++ b/chromium/base/threading/thread.h
@@ -14,12 +14,12 @@
#include "base/message_loop/timer_slack.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
namespace base {
class MessagePump;
-class WaitableEvent;
// A simple thread abstraction that establishes a MessageLoop on a new thread.
// The consumer uses the MessageLoop of the thread to cause code to execute on
@@ -86,7 +86,7 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// init_com_with_mta(false) and then StartWithOptions() with any message loop
// type other than TYPE_UI.
void init_com_with_mta(bool use_mta) {
- DCHECK(!start_event_);
+ DCHECK(!message_loop_);
com_status_ = use_mta ? MTA : STA;
}
#endif
@@ -118,7 +118,7 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// Blocks until the thread starts running. Called within StartAndWait().
// Note that calling this causes jank on the calling thread, must be used
// carefully for production code.
- bool WaitUntilThreadStarted();
+ bool WaitUntilThreadStarted() const;
// Signals the thread to exit and returns once the thread has exited. After
// this method returns, the Thread object is completely reset and may be used
@@ -148,7 +148,7 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// Returns the message loop for this thread. Use the MessageLoop's
// PostTask methods to execute code on the thread. This only returns
// non-null after a successful call to Start. After Stop has been called,
- // this will return NULL.
+ // this will return nullptr.
//
// NOTE: You must not call this MessageLoop's Quit method directly. Use
// the Thread's Stop method instead.
@@ -156,7 +156,7 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
MessageLoop* message_loop() const { return message_loop_; }
// Returns a TaskRunner for this thread. Use the TaskRunner's PostTask
- // methods to execute code on the thread. Returns NULL if the thread is not
+ // methods to execute code on the thread. Returns nullptr if the thread is not
// running (e.g. before Start or after Stop have been called). Callers can
// hold on to this even after the thread is gone; in this situation, attempts
// to PostTask() will fail.
@@ -170,8 +170,13 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// The native thread handle.
PlatformThreadHandle thread_handle() { return thread_; }
- // The thread ID.
- PlatformThreadId thread_id() const;
+ // Returns the thread ID. Should not be called before the first Start*()
+ // call. Keeps on returning the same ID even after a Stop() call. The next
+ // Start*() call renews the ID.
+ //
+ // WARNING: This function will block if the thread hasn't started yet.
+ //
+ PlatformThreadId GetThreadId() const;
// Returns true if the thread has been started, and not yet stopped.
bool IsRunning() const;
@@ -211,16 +216,22 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
#endif
// If true, we're in the middle of stopping, and shouldn't access
- // |message_loop_|. It may non-NULL and invalid.
+ // |message_loop_|. It may non-nullptr and invalid.
+ // Should be written on the thread that created this thread. Also read data
+ // could be wrong on other threads.
bool stopping_;
// True while inside of Run().
bool running_;
- mutable base::Lock running_lock_; // Protects running_.
+ mutable base::Lock running_lock_; // Protects |running_|.
// The thread's handle.
PlatformThreadHandle thread_;
- mutable base::Lock thread_lock_; // Protects thread_.
+ mutable base::Lock thread_lock_; // Protects |thread_|.
+
+ // The thread's id once it has started.
+ PlatformThreadId id_;
+ mutable WaitableEvent id_event_; // Protects |id_|.
// The thread's message loop. Valid only while the thread is alive. Set
// by the created thread.
@@ -233,8 +244,8 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// The name of the thread. Used for debugging purposes.
std::string name_;
- // Non-null if the thread has successfully started.
- scoped_ptr<WaitableEvent> start_event_;
+ // Signaled when the created thread gets ready to use the message loop.
+ mutable WaitableEvent start_event_;
friend void ThreadQuitHelper();
diff --git a/chromium/base/threading/thread_checker.h b/chromium/base/threading/thread_checker.h
index 449247af9dd..1d970f093ed 100644
--- a/chromium/base/threading/thread_checker.h
+++ b/chromium/base/threading/thread_checker.h
@@ -5,23 +5,19 @@
#ifndef BASE_THREADING_THREAD_CHECKER_H_
#define BASE_THREADING_THREAD_CHECKER_H_
+#include "base/logging.h"
+#include "base/threading/thread_checker_impl.h"
+
// Apart from debug builds, we also enable the thread checker in
// builds with DCHECK_ALWAYS_ON so that trybots and waterfall bots
// with this define will get the same level of thread checking as
// debug bots.
-//
-// Note that this does not perfectly match situations where DCHECK is
-// enabled. For example a non-official release build may have
-// DCHECK_ALWAYS_ON undefined (and therefore ThreadChecker would be
-// disabled) but have DCHECKs enabled at runtime.
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
+#if DCHECK_IS_ON()
#define ENABLE_THREAD_CHECKER 1
#else
#define ENABLE_THREAD_CHECKER 0
#endif
-#include "base/threading/thread_checker_impl.h"
-
namespace base {
// Do nothing implementation, for use in release mode.
diff --git a/chromium/base/threading/thread_id_name_manager.h b/chromium/base/threading/thread_id_name_manager.h
index 927d25fe1e8..1ba7e13e57b 100644
--- a/chromium/base/threading/thread_id_name_manager.h
+++ b/chromium/base/threading/thread_id_name_manager.h
@@ -13,10 +13,11 @@
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
-template <typename T> struct DefaultSingletonTraits;
-
namespace base {
+template <typename T>
+struct DefaultSingletonTraits;
+
class BASE_EXPORT ThreadIdNameManager {
public:
static ThreadIdNameManager* GetInstance();
diff --git a/chromium/base/threading/thread_id_name_manager_unittest.cc b/chromium/base/threading/thread_id_name_manager_unittest.cc
index b17c6814120..350dc0fa73b 100644
--- a/chromium/base/threading/thread_id_name_manager_unittest.cc
+++ b/chromium/base/threading/thread_id_name_manager_unittest.cc
@@ -24,8 +24,8 @@ TEST_F(ThreadIdNameManagerTest, AddThreads) {
thread_a.StartAndWaitForTesting();
thread_b.StartAndWaitForTesting();
- EXPECT_STREQ(kAThread, manager->GetName(thread_a.thread_id()));
- EXPECT_STREQ(kBThread, manager->GetName(thread_b.thread_id()));
+ EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
+ EXPECT_STREQ(kBThread, manager->GetName(thread_b.GetThreadId()));
thread_b.Stop();
thread_a.Stop();
@@ -41,10 +41,10 @@ TEST_F(ThreadIdNameManagerTest, RemoveThreads) {
thread_b.StartAndWaitForTesting();
thread_b.Stop();
}
- EXPECT_STREQ(kAThread, manager->GetName(thread_a.thread_id()));
+ EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
thread_a.Stop();
- EXPECT_STREQ("", manager->GetName(thread_a.thread_id()));
+ EXPECT_STREQ("", manager->GetName(thread_a.GetThreadId()));
}
TEST_F(ThreadIdNameManagerTest, RestartThread) {
@@ -52,13 +52,13 @@ TEST_F(ThreadIdNameManagerTest, RestartThread) {
base::Thread thread_a(kAThread);
thread_a.StartAndWaitForTesting();
- base::PlatformThreadId a_id = thread_a.thread_id();
+ base::PlatformThreadId a_id = thread_a.GetThreadId();
EXPECT_STREQ(kAThread, manager->GetName(a_id));
thread_a.Stop();
thread_a.StartAndWaitForTesting();
EXPECT_STREQ("", manager->GetName(a_id));
- EXPECT_STREQ(kAThread, manager->GetName(thread_a.thread_id()));
+ EXPECT_STREQ(kAThread, manager->GetName(thread_a.GetThreadId()));
thread_a.Stop();
}
diff --git a/chromium/base/threading/thread_local_storage.cc b/chromium/base/threading/thread_local_storage.cc
index 0bb396cfd79..701f6a2af9b 100644
--- a/chromium/base/threading/thread_local_storage.cc
+++ b/chromium/base/threading/thread_local_storage.cc
@@ -192,8 +192,8 @@ void PlatformThreadLocalStorage::OnThreadExit(void* value) {
} // namespace internal
ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
- initialized_ = false;
slot_ = 0;
+ base::subtle::Release_Store(&initialized_, 0);
Initialize(destructor);
}
@@ -211,7 +211,7 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
// Setup our destructor.
g_tls_destructors[slot_] = destructor;
- initialized_ = true;
+ base::subtle::Release_Store(&initialized_, 1);
}
void ThreadLocalStorage::StaticSlot::Free() {
@@ -221,7 +221,7 @@ void ThreadLocalStorage::StaticSlot::Free() {
DCHECK_LT(slot_, kThreadLocalStorageSize);
g_tls_destructors[slot_] = NULL;
slot_ = 0;
- initialized_ = false;
+ base::subtle::Release_Store(&initialized_, 0);
}
void* ThreadLocalStorage::StaticSlot::Get() const {
diff --git a/chromium/base/threading/thread_local_storage.h b/chromium/base/threading/thread_local_storage.h
index 50f88685a57..195bff683c3 100644
--- a/chromium/base/threading/thread_local_storage.h
+++ b/chromium/base/threading/thread_local_storage.h
@@ -5,6 +5,7 @@
#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/basictypes.h"
@@ -114,10 +115,12 @@ class BASE_EXPORT ThreadLocalStorage {
// value 'value'.
void Set(void* value);
- bool initialized() const { return initialized_; }
+ bool initialized() const {
+ return base::subtle::Acquire_Load(&initialized_) != 0;
+ }
// The internals of this struct should be considered private.
- bool initialized_;
+ base::subtle::Atomic32 initialized_;
int slot_;
};
diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc
index 871f2dc874c..00306c5ae7d 100644
--- a/chromium/base/threading/thread_restrictions.cc
+++ b/chromium/base/threading/thread_restrictions.cc
@@ -23,7 +23,7 @@ LazyInstance<ThreadLocalBoolean>::Leaky
LazyInstance<ThreadLocalBoolean>::Leaky
g_wait_disallowed = LAZY_INSTANCE_INITIALIZER;
-} // anonymous namespace
+} // namespace
// static
bool ThreadRestrictions::SetIOAllowed(bool allowed) {
@@ -69,7 +69,7 @@ void ThreadRestrictions::DisallowWaiting() {
// static
void ThreadRestrictions::AssertWaitAllowed() {
if (g_wait_disallowed.Get().Get()) {
- LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent"
+ LOG(FATAL) << "Waiting is not allowed to be used on this thread to prevent "
<< "jank and deadlock.";
}
}
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index b6cfa723206..a71cad2795b 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -40,7 +40,6 @@ class BrowserShutdownProfileDumper;
class BrowserTestBase;
class GpuChannelHost;
class NestedMessagePumpAndroid;
-class RenderWidgetResizeHelper;
class ScopedAllowWaitForAndroidLayoutTests;
class ScopedAllowWaitForDebugURL;
class TextInputClientMac;
@@ -68,6 +67,10 @@ namespace remoting {
class AutoThread;
}
+namespace ui {
+class WindowResizeHelperMac;
+}
+
namespace base {
namespace android {
@@ -175,7 +178,6 @@ class BASE_EXPORT ThreadRestrictions {
friend class content::BrowserShutdownProfileDumper;
friend class content::BrowserTestBase;
friend class content::NestedMessagePumpAndroid;
- friend class content::RenderWidgetResizeHelper;
friend class content::ScopedAllowWaitForAndroidLayoutTests;
friend class content::ScopedAllowWaitForDebugURL;
friend class ::HistogramSynchronizer;
@@ -184,6 +186,7 @@ class BASE_EXPORT ThreadRestrictions {
friend class cc::TaskGraphRunner;
friend class mojo::common::WatcherThreadManager;
friend class remoting::AutoThread;
+ friend class ui::WindowResizeHelperMac;
friend class MessagePumpDefault;
friend class SequencedWorkerPool;
friend class SimpleThread;
diff --git a/chromium/base/threading/thread_unittest.cc b/chromium/base/threading/thread_unittest.cc
index 3c3541657eb..94220810c16 100644
--- a/chromium/base/threading/thread_unittest.cc
+++ b/chromium/base/threading/thread_unittest.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
@@ -105,6 +106,15 @@ void RegisterDestructionObserver(
base::MessageLoop::current()->AddDestructionObserver(observer);
}
+// Task that calls GetThreadId() of |thread|, stores the result into |id|, then
+// signal |event|.
+void ReturnThreadId(base::Thread* thread,
+ base::PlatformThreadId* id,
+ base::WaitableEvent* event) {
+ *id = thread->GetThreadId();
+ event->Signal();
+}
+
} // namespace
TEST_F(ThreadTest, Restart) {
@@ -194,6 +204,47 @@ TEST_F(ThreadTest, ThreadName) {
EXPECT_EQ("ThreadName", a.thread_name());
}
+TEST_F(ThreadTest, ThreadId) {
+ Thread a("ThreadId0");
+ Thread b("ThreadId1");
+ a.Start();
+ b.Start();
+
+ // Post a task that calls GetThreadId() on the created thread.
+ base::WaitableEvent event(false, false);
+ base::PlatformThreadId id_from_new_thread;
+ a.task_runner()->PostTask(
+ FROM_HERE, base::Bind(ReturnThreadId, &a, &id_from_new_thread, &event));
+
+ // Call GetThreadId() on the current thread before calling event.Wait() so
+ // that this test can find a race issue with TSAN.
+ base::PlatformThreadId id_from_current_thread = a.GetThreadId();
+
+ // Check if GetThreadId() returns consistent value in both threads.
+ event.Wait();
+ EXPECT_EQ(id_from_current_thread, id_from_new_thread);
+
+ // A started thread should have a valid ID.
+ EXPECT_NE(base::kInvalidThreadId, a.GetThreadId());
+ EXPECT_NE(base::kInvalidThreadId, b.GetThreadId());
+
+ // Each thread should have a different thread ID.
+ EXPECT_NE(a.GetThreadId(), b.GetThreadId());
+}
+
+TEST_F(ThreadTest, ThreadIdWithRestart) {
+ Thread a("ThreadIdWithRestart");
+ base::PlatformThreadId previous_id = base::kInvalidThreadId;
+
+ for (size_t i = 0; i < 16; ++i) {
+ EXPECT_TRUE(a.Start());
+ base::PlatformThreadId current_id = a.GetThreadId();
+ EXPECT_NE(previous_id, current_id);
+ previous_id = current_id;
+ a.Stop();
+ }
+}
+
// Make sure Init() is called after Start() and before
// WaitUntilThreadInitialized() returns.
TEST_F(ThreadTest, SleepInsideInit) {
diff --git a/chromium/base/threading/worker_pool.cc b/chromium/base/threading/worker_pool.cc
index bc016cec0d9..71b1a2bda35 100644
--- a/chromium/base/threading/worker_pool.cc
+++ b/chromium/base/threading/worker_pool.cc
@@ -21,6 +21,7 @@ class PostTaskAndReplyWorkerPool : public internal::PostTaskAndReplyImpl {
explicit PostTaskAndReplyWorkerPool(bool task_is_slow)
: task_is_slow_(task_is_slow) {
}
+ ~PostTaskAndReplyWorkerPool() override = default;
private:
bool PostTask(const tracked_objects::Location& from_here,
diff --git a/chromium/base/threading/worker_pool.h b/chromium/base/threading/worker_pool.h
index a52a41428b3..f8c62357d93 100644
--- a/chromium/base/threading/worker_pool.h
+++ b/chromium/base/threading/worker_pool.h
@@ -22,11 +22,11 @@ class TaskRunner;
// This is a facility that runs tasks that don't require a specific thread or
// a message loop.
//
-// WARNING: This shouldn't be used unless absolutely necessary. We don't wait
-// for the worker pool threads to finish on shutdown, so the tasks running
-// inside the pool must be extremely careful about other objects they access
-// (MessageLoops, Singletons, etc). During shutdown these object may no longer
-// exist.
+// WARNING: This shouldn't be used unless absolutely necessary. Typically
+// (without calling ShutDownCleanly()), we don't wait for the worker pool
+// threads to finish on shutdown, so the tasks running inside the pool must be
+// extremely careful about other objects they access (MessageLoops, Singletons,
+// etc). During shutdown these object may no longer exist.
class BASE_EXPORT WorkerPool {
public:
// This function posts |task| to run on a worker thread. |task_is_slow|
@@ -53,6 +53,13 @@ class BASE_EXPORT WorkerPool {
// Get a TaskRunner wrapper which posts to the WorkerPool using the given
// |task_is_slow| behavior.
static const scoped_refptr<TaskRunner>& GetTaskRunner(bool task_is_slow);
+
+ // Blocks until all worker threads quit cleanly. Please note that it ensures
+ // that no worker threads are running after the method returns, but it doesn't
+ // guarantee to process all queued pending tasks. This method may take a long
+ // time. Please don't use it unless absolutely necessary, e.g., when we want
+ // to unload the library containing the worker pool before process shutdown.
+ static void ShutDownCleanly();
};
} // namespace base
diff --git a/chromium/base/threading/worker_pool_posix.cc b/chromium/base/threading/worker_pool_posix.cc
index 349b5d751c1..231aa68deaf 100644
--- a/chromium/base/threading/worker_pool_posix.cc
+++ b/chromium/base/threading/worker_pool_posix.cc
@@ -22,10 +22,10 @@ namespace base {
namespace {
-base::LazyInstance<ThreadLocalBoolean>::Leaky
- g_worker_pool_running_on_this_thread = LAZY_INSTANCE_INITIALIZER;
+LazyInstance<ThreadLocalBoolean>::Leaky g_worker_pool_running_on_this_thread =
+ LAZY_INSTANCE_INITIALIZER;
-const int kIdleSecondsBeforeExit = 10 * 60;
+const int64 kIdleSecondsBeforeExit = 10 * 60;
class WorkerPoolImpl {
public:
@@ -33,49 +33,55 @@ class WorkerPoolImpl {
~WorkerPoolImpl();
void PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow);
+ const Closure& task,
+ bool task_is_slow);
+
+ void ShutDownCleanly();
private:
- scoped_refptr<base::PosixDynamicThreadPool> pool_;
+ scoped_refptr<PosixDynamicThreadPool> pool_;
};
WorkerPoolImpl::WorkerPoolImpl()
- : pool_(new base::PosixDynamicThreadPool("WorkerPool",
- kIdleSecondsBeforeExit)) {
+ : pool_(new PosixDynamicThreadPool(
+ "WorkerPool",
+ TimeDelta::FromSeconds(kIdleSecondsBeforeExit))) {
}
WorkerPoolImpl::~WorkerPoolImpl() {
- pool_->Terminate();
+ pool_->Terminate(false);
}
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow) {
+ const Closure& task,
+ bool task_is_slow) {
pool_->PostTask(from_here, task);
}
-base::LazyInstance<WorkerPoolImpl> g_lazy_worker_pool =
- LAZY_INSTANCE_INITIALIZER;
+void WorkerPoolImpl::ShutDownCleanly() {
+ pool_->Terminate(true);
+}
+
+LazyInstance<WorkerPoolImpl> g_lazy_worker_pool = LAZY_INSTANCE_INITIALIZER;
class WorkerThread : public PlatformThread::Delegate {
public:
- WorkerThread(const std::string& name_prefix,
- base::PosixDynamicThreadPool* pool)
- : name_prefix_(name_prefix),
- pool_(pool) {}
+ WorkerThread(const std::string& name_prefix, PosixDynamicThreadPool* pool)
+ : name_prefix_(name_prefix), pool_(pool) {}
void ThreadMain() override;
private:
const std::string name_prefix_;
- scoped_refptr<base::PosixDynamicThreadPool> pool_;
+ scoped_refptr<PosixDynamicThreadPool> pool_;
DISALLOW_COPY_AND_ASSIGN(WorkerThread);
};
void WorkerThread::ThreadMain() {
g_worker_pool_running_on_this_thread.Get().Set(true);
- const std::string name = base::StringPrintf(
- "%s/%d", name_prefix_.c_str(), PlatformThread::CurrentId());
+ const std::string name =
+ StringPrintf("%s/%d", name_prefix_.c_str(), PlatformThread::CurrentId());
// Note |name.c_str()| must remain valid for for the whole life of the thread.
PlatformThread::SetName(name);
@@ -96,7 +102,7 @@ void WorkerThread::ThreadMain() {
pending_task.birth_tally, pending_task.time_posted, stopwatch);
}
- // The WorkerThread is non-joinable, so it deletes itself.
+ pool_->NotifyWorkerIsGoingAway(PlatformThread::CurrentHandle());
delete this;
}
@@ -104,7 +110,8 @@ void WorkerThread::ThreadMain() {
// static
bool WorkerPool::PostTask(const tracked_objects::Location& from_here,
- const base::Closure& task, bool task_is_slow) {
+ const Closure& task,
+ bool task_is_slow) {
g_lazy_worker_pool.Pointer()->PostTask(from_here, task, task_is_slow);
return true;
}
@@ -114,74 +121,82 @@ bool WorkerPool::RunsTasksOnCurrentThread() {
return g_worker_pool_running_on_this_thread.Get().Get();
}
+// static
+void WorkerPool::ShutDownCleanly() {
+ g_lazy_worker_pool.Pointer()->ShutDownCleanly();
+}
+
PosixDynamicThreadPool::PosixDynamicThreadPool(const std::string& name_prefix,
- int idle_seconds_before_exit)
+ TimeDelta idle_time_before_exit)
: name_prefix_(name_prefix),
- idle_seconds_before_exit_(idle_seconds_before_exit),
+ idle_time_before_exit_(idle_time_before_exit),
pending_tasks_available_cv_(&lock_),
num_idle_threads_(0),
- terminated_(false) {}
+ has_pending_cleanup_task_(false),
+ terminated_(false) {
+}
PosixDynamicThreadPool::~PosixDynamicThreadPool() {
while (!pending_tasks_.empty())
pending_tasks_.pop();
}
-void PosixDynamicThreadPool::Terminate() {
+void PosixDynamicThreadPool::Terminate(bool blocking) {
+ std::vector<PlatformThreadHandle> threads_to_cleanup;
+ std::vector<PlatformThreadHandle> worker_threads;
{
AutoLock locked(lock_);
- DCHECK(!terminated_) << "Thread pool is already terminated.";
+ if (terminated_)
+ return;
terminated_ = true;
+
+ threads_to_cleanup.swap(threads_to_cleanup_);
+ worker_threads.swap(worker_threads_);
}
pending_tasks_available_cv_.Broadcast();
+
+ if (blocking) {
+ for (const auto& item : threads_to_cleanup)
+ PlatformThread::Join(item);
+
+ for (const auto& item : worker_threads)
+ PlatformThread::Join(item);
+
+ // No need to take the lock. No one else should be accessing these members.
+ DCHECK_EQ(0u, num_idle_threads_);
+ // The following members should not have new elements added after
+ // |terminated_| is set to true.
+ DCHECK(threads_to_cleanup_.empty());
+ DCHECK(worker_threads_.empty());
+ }
}
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
- const base::Closure& task) {
+ const Closure& task) {
PendingTask pending_task(from_here, task);
- AddTask(&pending_task);
-}
-
-void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
AutoLock locked(lock_);
- DCHECK(!terminated_) <<
- "This thread pool is already terminated. Do not post new tasks.";
-
- pending_tasks_.push(*pending_task);
- pending_task->task.Reset();
-
- // We have enough worker threads.
- if (static_cast<size_t>(num_idle_threads_) >= pending_tasks_.size()) {
- pending_tasks_available_cv_.Signal();
- } else {
- // The new PlatformThread will take ownership of the WorkerThread object,
- // which will delete itself on exit.
- WorkerThread* worker =
- new WorkerThread(name_prefix_, this);
- PlatformThread::CreateNonJoinable(0, worker);
- }
+ AddTaskNoLock(&pending_task);
}
PendingTask PosixDynamicThreadPool::WaitForTask() {
AutoLock locked(lock_);
if (terminated_)
- return PendingTask(FROM_HERE, base::Closure());
+ return PendingTask(FROM_HERE, Closure());
if (pending_tasks_.empty()) { // No work available, wait for work.
num_idle_threads_++;
- if (num_idle_threads_cv_.get())
- num_idle_threads_cv_->Signal();
- pending_tasks_available_cv_.TimedWait(
- TimeDelta::FromSeconds(idle_seconds_before_exit_));
+ if (num_threads_cv_)
+ num_threads_cv_->Broadcast();
+ pending_tasks_available_cv_.TimedWait(idle_time_before_exit_);
num_idle_threads_--;
- if (num_idle_threads_cv_.get())
- num_idle_threads_cv_->Signal();
+ if (num_threads_cv_)
+ num_threads_cv_->Broadcast();
if (pending_tasks_.empty()) {
- // We waited for work, but there's still no work. Return NULL to signal
- // the thread to terminate.
- return PendingTask(FROM_HERE, base::Closure());
+ // We waited for work, but there's still no work. Return an empty task to
+ // signal the thread to terminate.
+ return PendingTask(FROM_HERE, Closure());
}
}
@@ -190,4 +205,72 @@ PendingTask PosixDynamicThreadPool::WaitForTask() {
return pending_task;
}
+void PosixDynamicThreadPool::NotifyWorkerIsGoingAway(
+ PlatformThreadHandle worker) {
+ AutoLock locked(lock_);
+ if (terminated_)
+ return;
+
+ auto new_end = std::remove_if(worker_threads_.begin(), worker_threads_.end(),
+ [worker](PlatformThreadHandle handle) {
+ return handle.is_equal(worker);
+ });
+ DCHECK_EQ(1, worker_threads_.end() - new_end);
+ worker_threads_.erase(new_end, worker_threads_.end());
+
+ threads_to_cleanup_.push_back(worker);
+
+ if (num_threads_cv_)
+ num_threads_cv_->Broadcast();
+
+ if (!has_pending_cleanup_task_) {
+ has_pending_cleanup_task_ = true;
+ PendingTask pending_task(
+ FROM_HERE,
+ base::Bind(&PosixDynamicThreadPool::CleanUpThreads, Unretained(this)));
+ AddTaskNoLock(&pending_task);
+ }
+}
+
+void PosixDynamicThreadPool::AddTaskNoLock(PendingTask* pending_task) {
+ lock_.AssertAcquired();
+
+ if (terminated_) {
+ LOG(WARNING)
+ << "This thread pool is already terminated. Do not post new tasks.";
+ return;
+ }
+
+ pending_tasks_.push(*pending_task);
+ pending_task->task.Reset();
+
+ // We have enough worker threads.
+ if (num_idle_threads_ >=
+ pending_tasks_.size() - (has_pending_cleanup_task_ ? 1 : 0)) {
+ pending_tasks_available_cv_.Signal();
+ } else {
+ // The new PlatformThread will take ownership of the WorkerThread object,
+ // which will delete itself on exit.
+ WorkerThread* worker = new WorkerThread(name_prefix_, this);
+ PlatformThreadHandle handle;
+ PlatformThread::Create(0, worker, &handle);
+ worker_threads_.push_back(handle);
+
+ if (num_threads_cv_)
+ num_threads_cv_->Broadcast();
+ }
+}
+
+void PosixDynamicThreadPool::CleanUpThreads() {
+ std::vector<PlatformThreadHandle> threads_to_cleanup;
+ {
+ AutoLock locked(lock_);
+ DCHECK(has_pending_cleanup_task_);
+ has_pending_cleanup_task_ = false;
+ threads_to_cleanup.swap(threads_to_cleanup_);
+ }
+ for (const auto& item : threads_to_cleanup)
+ PlatformThread::Join(item);
+}
+
} // namespace base
diff --git a/chromium/base/threading/worker_pool_posix.h b/chromium/base/threading/worker_pool_posix.h
index dd0ffb656fa..d3c4a8ff29a 100644
--- a/chromium/base/threading/worker_pool_posix.h
+++ b/chromium/base/threading/worker_pool_posix.h
@@ -5,12 +5,12 @@
// The thread pool used in the POSIX implementation of WorkerPool dynamically
// adds threads as necessary to handle all tasks. It keeps old threads around
// for a period of time to allow them to be reused. After this waiting period,
-// the threads exit. This thread pool uses non-joinable threads, therefore
-// worker threads are not joined during process shutdown. This means that
-// potentially long running tasks (such as DNS lookup) do not block process
-// shutdown, but also means that process shutdown may "leak" objects. Note that
-// although PosixDynamicThreadPool spawns the worker threads and manages the
-// task queue, it does not own the worker threads. The worker threads ask the
+// the threads exit. Unless blocking termination is requested, worker threads
+// are not joined during process shutdown. This means that potentially long
+// running tasks (such as DNS lookup) do not block process shutdown, but also
+// means that process shutdown may "leak" objects. Note that although
+// PosixDynamicThreadPool spawns the worker threads and manages the task queue,
+// it does not own the worker threads. The worker threads ask the
// PosixDynamicThreadPool for work and eventually clean themselves up. The
// worker threads all maintain scoped_refptrs to the PosixDynamicThreadPool
// instance, which prevents PosixDynamicThreadPool from disappearing before all
@@ -26,6 +26,7 @@
#include <queue>
#include <string>
+#include <vector>
#include "base/basictypes.h"
#include "base/callback_forward.h"
@@ -36,6 +37,7 @@
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
#include "base/tracked_objects.h"
class Task;
@@ -48,34 +50,44 @@ class BASE_EXPORT PosixDynamicThreadPool
class PosixDynamicThreadPoolPeer;
// All worker threads will share the same |name_prefix|. They will exit after
- // |idle_seconds_before_exit|.
+ // |idle_time_before_exit|.
PosixDynamicThreadPool(const std::string& name_prefix,
- int idle_seconds_before_exit);
+ TimeDelta idle_time_before_exit);
// Indicates that the thread pool is going away. Stops handing out tasks to
- // worker threads. Wakes up all the idle threads to let them exit.
- void Terminate();
+ // worker threads. Wakes up all the idle threads to let them exit. If
+ // |blocking| is set to true, the call returns after all worker threads have
+ // quit.
+ // The second and subsequent calls to this method are ignored, regardless of
+ // the value of |blocking|.
+ void Terminate(bool blocking);
// Adds |task| to the thread pool.
void PostTask(const tracked_objects::Location& from_here,
const Closure& task);
- // Worker thread method to wait for up to |idle_seconds_before_exit| for more
- // work from the thread pool. Returns NULL if no work is available.
+ // Worker thread method to wait for up to |idle_time_before_exit| for more
+ // work from the thread pool. Returns an empty task if no work is available.
PendingTask WaitForTask();
+ // Marks |worker| as dead and enqueues a cleanup task to join dead worker
+ // threads. Unlike tasks enqueued by PostTask(), cleanup tasks never cause new
+ // worker threads to be created.
+ void NotifyWorkerIsGoingAway(PlatformThreadHandle worker);
+
private:
friend class RefCountedThreadSafe<PosixDynamicThreadPool>;
- friend class PosixDynamicThreadPoolPeer;
~PosixDynamicThreadPool();
// Adds pending_task to the thread pool. This function will clear
// |pending_task->task|.
- void AddTask(PendingTask* pending_task);
+ void AddTaskNoLock(PendingTask* pending_task);
+
+ void CleanUpThreads();
const std::string name_prefix_;
- const int idle_seconds_before_exit_;
+ const TimeDelta idle_time_before_exit_;
Lock lock_; // Protects all the variables below.
@@ -83,12 +95,20 @@ class BASE_EXPORT PosixDynamicThreadPool
// Also used for Broadcast()'ing to worker threads to let them know the pool
// is being deleted and they can exit.
ConditionVariable pending_tasks_available_cv_;
- int num_idle_threads_;
- TaskQueue pending_tasks_;
+ size_t num_idle_threads_;
+ bool has_pending_cleanup_task_;
+ std::queue<PendingTask> pending_tasks_;
bool terminated_;
- // Only used for tests to ensure correct thread ordering. It will always be
+
+ std::vector<PlatformThreadHandle> threads_to_cleanup_;
+ std::vector<PlatformThreadHandle> worker_threads_;
+
+ // Signaled when idle thread count or living thread count is changed. Please
+ // note that it won't be signaled when Terminate() is called.
+ //
+ // Only used for tests to ensure correct thread ordering. It will always be
// NULL in non-test code.
- scoped_ptr<ConditionVariable> num_idle_threads_cv_;
+ scoped_ptr<ConditionVariable> num_threads_cv_;
DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPool);
};
diff --git a/chromium/base/threading/worker_pool_posix_unittest.cc b/chromium/base/threading/worker_pool_posix_unittest.cc
index 354a99c538d..8d2368f37f9 100644
--- a/chromium/base/threading/worker_pool_posix_unittest.cc
+++ b/chromium/base/threading/worker_pool_posix_unittest.cc
@@ -10,8 +10,9 @@
#include "base/callback.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
-#include "base/threading/platform_thread.h"
#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -26,15 +27,17 @@ class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer {
ConditionVariable* pending_tasks_available_cv() {
return &pool_->pending_tasks_available_cv_;
}
- const std::queue<PendingTask>& pending_tasks() const {
- return pool_->pending_tasks_;
+ size_t num_pending_tasks() const { return pool_->pending_tasks_.size(); }
+ size_t num_idle_threads() const { return pool_->num_idle_threads_; }
+ ConditionVariable* num_threads_cv() { return pool_->num_threads_cv_.get(); }
+ void set_num_threads_cv(ConditionVariable* cv) {
+ pool_->num_threads_cv_.reset(cv);
}
- int num_idle_threads() const { return pool_->num_idle_threads_; }
- ConditionVariable* num_idle_threads_cv() {
- return pool_->num_idle_threads_cv_.get();
+ const std::vector<PlatformThreadHandle>& threads_to_cleanup() const {
+ return pool_->threads_to_cleanup_;
}
- void set_num_idle_threads_cv(ConditionVariable* cv) {
- pool_->num_idle_threads_cv_.reset(cv);
+ const std::vector<PlatformThreadHandle>& worker_threads() const {
+ return pool_->worker_threads_;
}
private:
@@ -45,6 +48,8 @@ class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer {
namespace {
+const int64 kDefaultIdleSecondsBeforeExit = 60 * 60;
+
// IncrementingTask's main purpose is to increment a counter. It also updates a
// set of unique thread ids, and signals a ConditionVariable on completion.
// Note that since it does not block, there is no way to control the number of
@@ -56,10 +61,10 @@ void IncrementingTask(Lock* counter_lock,
Lock* unique_threads_lock,
std::set<PlatformThreadId>* unique_threads) {
{
- base::AutoLock locked(*unique_threads_lock);
+ AutoLock locked(*unique_threads_lock);
unique_threads->insert(PlatformThread::CurrentId());
}
- base::AutoLock locked(*counter_lock);
+ AutoLock locked(*counter_lock);
(*counter)++;
}
@@ -73,12 +78,12 @@ struct BlockingIncrementingTaskArgs {
Lock* num_waiting_to_start_lock;
int* num_waiting_to_start;
ConditionVariable* num_waiting_to_start_cv;
- base::WaitableEvent* start;
+ WaitableEvent* start;
};
void BlockingIncrementingTask(const BlockingIncrementingTaskArgs& args) {
{
- base::AutoLock num_waiting_to_start_locked(*args.num_waiting_to_start_lock);
+ AutoLock num_waiting_to_start_locked(*args.num_waiting_to_start_lock);
(*args.num_waiting_to_start)++;
}
args.num_waiting_to_start_cv->Signal();
@@ -90,52 +95,62 @@ void BlockingIncrementingTask(const BlockingIncrementingTaskArgs& args) {
class PosixDynamicThreadPoolTest : public testing::Test {
protected:
PosixDynamicThreadPoolTest()
- : pool_(new base::PosixDynamicThreadPool("dynamic_pool", 60*60)),
- peer_(pool_.get()),
- counter_(0),
+ : counter_(0),
num_waiting_to_start_(0),
num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
start_(true, false) {}
- void SetUp() override {
- peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
- }
-
void TearDown() override {
// Wake up the idle threads so they can terminate.
- if (pool_.get()) pool_->Terminate();
+ if (pool_.get())
+ pool_->Terminate(false);
+ }
+
+ void Initialize(TimeDelta idle_time_before_exit) {
+ pool_ = new PosixDynamicThreadPool("dynamic_pool", idle_time_before_exit);
+ peer_.reset(
+ new PosixDynamicThreadPool::PosixDynamicThreadPoolPeer(pool_.get()));
+ peer_->set_num_threads_cv(new ConditionVariable(peer_->lock()));
}
void WaitForTasksToStart(int num_tasks) {
- base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
+ AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
while (num_waiting_to_start_ < num_tasks) {
num_waiting_to_start_cv_.Wait();
}
}
- void WaitForIdleThreads(int num_idle_threads) {
- base::AutoLock pool_locked(*peer_.lock());
- while (peer_.num_idle_threads() < num_idle_threads) {
- peer_.num_idle_threads_cv()->Wait();
+ void WaitForIdleThreads(size_t num_idle_threads) {
+ AutoLock pool_locked(*peer_->lock());
+ while (peer_->num_idle_threads() != num_idle_threads) {
+ peer_->num_threads_cv()->Wait();
+ }
+ }
+
+ void WaitForLivingThreads(int num_living_threads) {
+ AutoLock pool_locked(*peer_->lock());
+ while (static_cast<int>(peer_->worker_threads().size()) !=
+ num_living_threads) {
+ peer_->num_threads_cv()->Wait();
}
}
- base::Closure CreateNewIncrementingTaskCallback() {
- return base::Bind(&IncrementingTask, &counter_lock_, &counter_,
- &unique_threads_lock_, &unique_threads_);
+ Closure CreateNewIncrementingTaskCallback() {
+ return Bind(&IncrementingTask, &counter_lock_, &counter_,
+ &unique_threads_lock_, &unique_threads_);
}
- base::Closure CreateNewBlockingIncrementingTaskCallback() {
+ Closure CreateNewBlockingIncrementingTaskCallback() {
BlockingIncrementingTaskArgs args = {
&counter_lock_, &counter_, &unique_threads_lock_, &unique_threads_,
&num_waiting_to_start_lock_, &num_waiting_to_start_,
&num_waiting_to_start_cv_, &start_
};
- return base::Bind(&BlockingIncrementingTask, args);
+ return Bind(&BlockingIncrementingTask, args);
}
- scoped_refptr<base::PosixDynamicThreadPool> pool_;
- base::PosixDynamicThreadPool::PosixDynamicThreadPoolPeer peer_;
+ scoped_refptr<PosixDynamicThreadPool> pool_;
+ scoped_ptr<PosixDynamicThreadPool::PosixDynamicThreadPoolPeer> peer_;
Lock counter_lock_;
int counter_;
Lock unique_threads_lock_;
@@ -143,15 +158,17 @@ class PosixDynamicThreadPoolTest : public testing::Test {
Lock num_waiting_to_start_lock_;
int num_waiting_to_start_;
ConditionVariable num_waiting_to_start_cv_;
- base::WaitableEvent start_;
+ WaitableEvent start_;
};
} // namespace
TEST_F(PosixDynamicThreadPoolTest, Basic) {
- EXPECT_EQ(0, peer_.num_idle_threads());
+ Initialize(TimeDelta::FromSeconds(kDefaultIdleSecondsBeforeExit));
+
+ EXPECT_EQ(0U, peer_->num_idle_threads());
EXPECT_EQ(0U, unique_threads_.size());
- EXPECT_EQ(0U, peer_.pending_tasks().size());
+ EXPECT_EQ(0U, peer_->num_pending_tasks());
// Add one task and wait for it to be completed.
pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
@@ -164,6 +181,8 @@ TEST_F(PosixDynamicThreadPoolTest, Basic) {
}
TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) {
+ Initialize(TimeDelta::FromSeconds(kDefaultIdleSecondsBeforeExit));
+
// Add one task and wait for it to be completed.
pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
@@ -178,11 +197,13 @@ TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) {
WaitForIdleThreads(2);
EXPECT_EQ(2U, unique_threads_.size());
- EXPECT_EQ(2, peer_.num_idle_threads());
+ EXPECT_EQ(2U, peer_->num_idle_threads());
EXPECT_EQ(3, counter_);
}
TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) {
+ Initialize(TimeDelta::FromSeconds(kDefaultIdleSecondsBeforeExit));
+
// Add two blocking tasks.
pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
@@ -194,12 +215,14 @@ TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) {
WaitForIdleThreads(2);
EXPECT_EQ(2U, unique_threads_.size());
- EXPECT_EQ(2, peer_.num_idle_threads()) << "Existing threads are now idle.";
+ EXPECT_EQ(2U, peer_->num_idle_threads()) << "Existing threads are now idle.";
EXPECT_EQ(2, counter_);
}
TEST_F(PosixDynamicThreadPoolTest, Complex) {
- // Add two non blocking tasks and wait for them to finish.
+ Initialize(TimeDelta::FromSeconds(kDefaultIdleSecondsBeforeExit));
+
+ // Add one non blocking tasks and wait for it to finish.
pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
WaitForIdleThreads(1);
@@ -214,15 +237,15 @@ TEST_F(PosixDynamicThreadPoolTest, Complex) {
WaitForIdleThreads(2);
EXPECT_EQ(3, counter_);
- EXPECT_EQ(2, peer_.num_idle_threads());
+ EXPECT_EQ(2U, peer_->num_idle_threads());
EXPECT_EQ(2U, unique_threads_.size());
// Wake up all idle threads so they can exit.
{
- base::AutoLock locked(*peer_.lock());
- while (peer_.num_idle_threads() > 0) {
- peer_.pending_tasks_available_cv()->Signal();
- peer_.num_idle_threads_cv()->Wait();
+ AutoLock locked(*peer_->lock());
+ while (peer_->worker_threads().size() > 0) {
+ peer_->pending_tasks_available_cv()->Signal();
+ peer_->num_threads_cv()->Wait();
}
}
@@ -246,8 +269,77 @@ TEST_F(PosixDynamicThreadPoolTest, Complex) {
// be either 2 or 3 unique thread IDs in the set at this stage in the test.
EXPECT_TRUE(unique_threads_.size() >= 2 && unique_threads_.size() <= 3)
<< "unique_threads_.size() = " << unique_threads_.size();
- EXPECT_EQ(1, peer_.num_idle_threads());
+ EXPECT_EQ(1U, peer_->num_idle_threads());
EXPECT_EQ(4, counter_);
}
+TEST_F(PosixDynamicThreadPoolTest, NoNewThreadForCleanup) {
+ // Let worker threads quit quickly after they are idle.
+ Initialize(TimeDelta::FromMilliseconds(1));
+
+ for (size_t i = 0; i < 2; ++i) {
+ // This will create a worker thread.
+ pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTaskCallback());
+
+ WaitForTasksToStart(1);
+
+ PlatformThreadHandle worker;
+ {
+ AutoLock locked(*peer_->lock());
+ ASSERT_EQ(1u, peer_->worker_threads().size());
+ worker = peer_->worker_threads()[0];
+ }
+
+ start_.Signal();
+
+ // Wait for the worker thread to quit.
+ WaitForLivingThreads(0);
+
+ {
+ AutoLock locked(*peer_->lock());
+ // The thread that just quit is recorded for cleanup. But we don't create
+ // a worker thread just for doing that.
+ ASSERT_EQ(1u, peer_->threads_to_cleanup().size());
+ EXPECT_TRUE(worker.is_equal(peer_->threads_to_cleanup()[0]));
+ EXPECT_TRUE(peer_->worker_threads().empty());
+ }
+ }
+
+ pool_->Terminate(true);
+
+ {
+ AutoLock locked(*peer_->lock());
+ EXPECT_TRUE(peer_->threads_to_cleanup().empty());
+ EXPECT_TRUE(peer_->worker_threads().empty());
+ }
+}
+
+TEST_F(PosixDynamicThreadPoolTest, BlockingTerminate) {
+ // Let worker threads quit quickly after they are idle.
+ Initialize(TimeDelta::FromMilliseconds(3));
+
+ for (size_t i = 0; i < 5; ++i) {
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(i));
+ for (size_t j = 0; j < 50; ++j)
+ pool_->PostTask(FROM_HERE, CreateNewIncrementingTaskCallback());
+ }
+
+ pool_->Terminate(true);
+
+ {
+ AutoLock locked(*peer_->lock());
+ EXPECT_TRUE(peer_->threads_to_cleanup().empty());
+ EXPECT_TRUE(peer_->worker_threads().empty());
+ }
+
+ int counter = counter_;
+ EXPECT_GE(5 * 50, counter);
+ EXPECT_GE(5 * 50u, unique_threads_.size());
+
+ // Make sure that no threads are still running and trying to modify
+ // |counter_|.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
+ EXPECT_EQ(counter, counter_);
+}
+
} // namespace base
diff --git a/chromium/base/threading/worker_pool_win.cc b/chromium/base/threading/worker_pool_win.cc
index 1b0ade5e244..563702be15b 100644
--- a/chromium/base/threading/worker_pool_win.cc
+++ b/chromium/base/threading/worker_pool_win.cc
@@ -70,4 +70,10 @@ bool WorkerPool::RunsTasksOnCurrentThread() {
return g_worker_pool_running_on_this_thread.Get().Get();
}
+// static
+void WorkerPool::ShutDownCleanly() {
+ // TODO(yzshen): implement it.
+ NOTIMPLEMENTED();
+}
+
} // namespace base
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index 8cbb382bb96..10ffcc60665 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -144,7 +144,7 @@ Time Time::FromTimeT(time_t tt) {
return Time(); // Preserve 0 so we can tell it doesn't exist.
if (tt == std::numeric_limits<time_t>::max())
return Max();
- return Time((tt * kMicrosecondsPerSecond) + kTimeTToMicrosecondsOffset);
+ return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSeconds(tt);
}
time_t Time::ToTimeT() const {
@@ -166,11 +166,7 @@ time_t Time::ToTimeT() const {
Time Time::FromDoubleT(double dt) {
if (dt == 0 || std::isnan(dt))
return Time(); // Preserve 0 so we can tell it doesn't exist.
- if (dt == std::numeric_limits<double>::infinity())
- return Max();
- return Time(static_cast<int64>((dt *
- static_cast<double>(kMicrosecondsPerSecond)) +
- kTimeTToMicrosecondsOffset));
+ return Time(kTimeTToMicrosecondsOffset) + TimeDelta::FromSecondsD(dt);
}
double Time::ToDoubleT() const {
@@ -197,10 +193,8 @@ Time Time::FromTimeSpec(const timespec& ts) {
Time Time::FromJsTime(double ms_since_epoch) {
// The epoch is a valid time, so this constructor doesn't interpret
// 0 as the null time.
- if (ms_since_epoch == std::numeric_limits<double>::infinity())
- return Max();
- return Time(static_cast<int64>(ms_since_epoch * kMicrosecondsPerMillisecond) +
- kTimeTToMicrosecondsOffset);
+ return Time(kTimeTToMicrosecondsOffset) +
+ TimeDelta::FromMillisecondsD(ms_since_epoch);
}
double Time::ToJsTime() const {
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index a02fbeb01ff..e0a6ea37eaa 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -264,6 +264,9 @@ class BASE_EXPORT TimeDelta {
explicit TimeDelta(int64 delta_us) : delta_(delta_us) {
}
+ // Private method to build a delta from a double.
+ static TimeDelta FromDouble(double value);
+
// Delta in microseconds.
int64 delta_;
};
@@ -597,7 +600,6 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// static
inline TimeDelta TimeDelta::FromDays(int days) {
- // Preserve max to prevent overflow.
if (days == std::numeric_limits<int>::max())
return Max();
return TimeDelta(days * Time::kMicrosecondsPerDay);
@@ -605,7 +607,6 @@ inline TimeDelta TimeDelta::FromDays(int days) {
// static
inline TimeDelta TimeDelta::FromHours(int hours) {
- // Preserve max to prevent overflow.
if (hours == std::numeric_limits<int>::max())
return Max();
return TimeDelta(hours * Time::kMicrosecondsPerHour);
@@ -613,7 +614,6 @@ inline TimeDelta TimeDelta::FromHours(int hours) {
// static
inline TimeDelta TimeDelta::FromMinutes(int minutes) {
- // Preserve max to prevent overflow.
if (minutes == std::numeric_limits<int>::max())
return Max();
return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
@@ -621,44 +621,40 @@ inline TimeDelta TimeDelta::FromMinutes(int minutes) {
// static
inline TimeDelta TimeDelta::FromSeconds(int64 secs) {
- // Preserve max to prevent overflow.
- if (secs == std::numeric_limits<int64>::max())
- return Max();
- return TimeDelta(secs * Time::kMicrosecondsPerSecond);
+ return TimeDelta(secs) * Time::kMicrosecondsPerSecond;
}
// static
inline TimeDelta TimeDelta::FromMilliseconds(int64 ms) {
- // Preserve max to prevent overflow.
- if (ms == std::numeric_limits<int64>::max())
- return Max();
- return TimeDelta(ms * Time::kMicrosecondsPerMillisecond);
+ return TimeDelta(ms) * Time::kMicrosecondsPerMillisecond;
}
// static
inline TimeDelta TimeDelta::FromSecondsD(double secs) {
- // Preserve max to prevent overflow.
- if (secs == std::numeric_limits<double>::infinity())
- return Max();
- return TimeDelta(static_cast<int64>(secs * Time::kMicrosecondsPerSecond));
+ return FromDouble(secs * Time::kMicrosecondsPerSecond);
}
// static
inline TimeDelta TimeDelta::FromMillisecondsD(double ms) {
- // Preserve max to prevent overflow.
- if (ms == std::numeric_limits<double>::infinity())
- return Max();
- return TimeDelta(static_cast<int64>(ms * Time::kMicrosecondsPerMillisecond));
+ return FromDouble(ms * Time::kMicrosecondsPerMillisecond);
}
// static
inline TimeDelta TimeDelta::FromMicroseconds(int64 us) {
- // Preserve max to prevent overflow.
- if (us == std::numeric_limits<int64>::max())
- return Max();
return TimeDelta(us);
}
+// static
+inline TimeDelta TimeDelta::FromDouble(double value) {
+ double max_magnitude = std::numeric_limits<int64>::max();
+ TimeDelta delta = TimeDelta(static_cast<int64>(value));
+ if (value > max_magnitude)
+ delta = Max();
+ else if (value < -max_magnitude)
+ delta = -Max();
+ return delta;
+}
+
// For logging use only.
BASE_EXPORT std::ostream& operator<<(std::ostream& os, Time time);
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index b7e05b786b4..512fc37d3fc 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -484,52 +484,6 @@ TEST_F(TimeTest, ExplodeBeforeUnixEpoch) {
EXPECT_EQ(1, exploded.millisecond);
}
-TEST_F(TimeTest, TimeDeltaMax) {
- TimeDelta max = TimeDelta::Max();
- EXPECT_TRUE(max.is_max());
- EXPECT_EQ(max, TimeDelta::Max());
- EXPECT_GT(max, TimeDelta::FromDays(100 * 365));
- EXPECT_GT(max, TimeDelta());
-}
-
-TEST_F(TimeTest, TimeDeltaMaxConversions) {
- TimeDelta t = TimeDelta::Max();
- EXPECT_EQ(std::numeric_limits<int64>::max(), t.ToInternalValue());
-
- EXPECT_EQ(std::numeric_limits<int>::max(), t.InDays());
- EXPECT_EQ(std::numeric_limits<int>::max(), t.InHours());
- EXPECT_EQ(std::numeric_limits<int>::max(), t.InMinutes());
- EXPECT_EQ(std::numeric_limits<double>::infinity(), t.InSecondsF());
- EXPECT_EQ(std::numeric_limits<int64>::max(), t.InSeconds());
- EXPECT_EQ(std::numeric_limits<double>::infinity(), t.InMillisecondsF());
- EXPECT_EQ(std::numeric_limits<int64>::max(), t.InMilliseconds());
- EXPECT_EQ(std::numeric_limits<int64>::max(), t.InMillisecondsRoundedUp());
-
- t = TimeDelta::FromDays(std::numeric_limits<int>::max());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromHours(std::numeric_limits<int>::max());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromMinutes(std::numeric_limits<int>::max());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromSeconds(std::numeric_limits<int64>::max());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromMilliseconds(std::numeric_limits<int64>::max());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromSecondsD(std::numeric_limits<double>::infinity());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromMillisecondsD(std::numeric_limits<double>::infinity());
- EXPECT_TRUE(t.is_max());
-
- t = TimeDelta::FromMicroseconds(std::numeric_limits<int64>::max());
- EXPECT_TRUE(t.is_max());
-}
-
TEST_F(TimeTest, Max) {
Time max = Time::Max();
EXPECT_TRUE(max.is_max());
@@ -795,6 +749,7 @@ TEST(TimeDelta, FromAndIn) {
EXPECT_EQ(13, TimeDelta::FromMillisecondsD(13.3).InMilliseconds());
EXPECT_EQ(13.3, TimeDelta::FromMillisecondsD(13.3).InMillisecondsF());
EXPECT_EQ(13, TimeDelta::FromMicroseconds(13).InMicroseconds());
+ EXPECT_EQ(3.456, TimeDelta::FromMillisecondsD(3.45678).InMillisecondsF());
}
#if defined(OS_POSIX)
@@ -868,6 +823,83 @@ TEST(TimeDelta, Magnitude) {
TimeDelta::FromMicroseconds(min_int64_plus_two).magnitude());
}
+TEST(TimeDelta, Max) {
+ TimeDelta max = TimeDelta::Max();
+ EXPECT_TRUE(max.is_max());
+ EXPECT_EQ(max, TimeDelta::Max());
+ EXPECT_GT(max, TimeDelta::FromDays(100 * 365));
+ EXPECT_GT(max, TimeDelta());
+}
+
+bool IsMin(TimeDelta delta) {
+ return (-delta).is_max();
+}
+
+TEST(TimeDelta, MaxConversions) {
+ TimeDelta t = TimeDelta::Max();
+ EXPECT_EQ(std::numeric_limits<int64>::max(), t.ToInternalValue());
+
+ EXPECT_EQ(std::numeric_limits<int>::max(), t.InDays());
+ EXPECT_EQ(std::numeric_limits<int>::max(), t.InHours());
+ EXPECT_EQ(std::numeric_limits<int>::max(), t.InMinutes());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), t.InSecondsF());
+ EXPECT_EQ(std::numeric_limits<int64>::max(), t.InSeconds());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), t.InMillisecondsF());
+ EXPECT_EQ(std::numeric_limits<int64>::max(), t.InMilliseconds());
+ EXPECT_EQ(std::numeric_limits<int64>::max(), t.InMillisecondsRoundedUp());
+
+ t = TimeDelta::FromDays(std::numeric_limits<int>::max());
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromHours(std::numeric_limits<int>::max());
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMinutes(std::numeric_limits<int>::max());
+ EXPECT_TRUE(t.is_max());
+
+ int64 max_int = std::numeric_limits<int64>::max();
+
+ t = TimeDelta::FromSeconds(max_int / Time::kMicrosecondsPerSecond + 1);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMilliseconds(max_int / Time::kMillisecondsPerSecond + 1);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMicroseconds(max_int);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromSeconds(-max_int / Time::kMicrosecondsPerSecond - 1);
+ EXPECT_TRUE(IsMin(t));
+
+ t = TimeDelta::FromMilliseconds(-max_int / Time::kMillisecondsPerSecond - 1);
+ EXPECT_TRUE(IsMin(t));
+
+ t = TimeDelta::FromMicroseconds(-max_int);
+ EXPECT_TRUE(IsMin(t));
+
+ t = -TimeDelta::FromMicroseconds(std::numeric_limits<int64>::min());
+ EXPECT_FALSE(IsMin(t));
+
+ t = TimeDelta::FromSecondsD(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(t.is_max());
+
+ double max_d = max_int;
+
+ t = TimeDelta::FromSecondsD(max_d / Time::kMicrosecondsPerSecond + 1);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMillisecondsD(std::numeric_limits<double>::infinity());
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromMillisecondsD(max_d / Time::kMillisecondsPerSecond * 2);
+ EXPECT_TRUE(t.is_max());
+
+ t = TimeDelta::FromSecondsD(-max_d / Time::kMicrosecondsPerSecond - 1);
+ EXPECT_TRUE(IsMin(t));
+
+ t = TimeDelta::FromMillisecondsD(-max_d / Time::kMillisecondsPerSecond * 2);
+ EXPECT_TRUE(IsMin(t));
+}
TEST(TimeDelta, NumericOperators) {
double d = 0.5;
@@ -894,7 +926,6 @@ TEST(TimeDelta, NumericOperators) {
EXPECT_EQ(TimeDelta::FromMilliseconds(500),
f * TimeDelta::FromMilliseconds(1000));
-
int i = 2;
EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
TimeDelta::FromMilliseconds(1000) * i);
@@ -919,7 +950,6 @@ TEST(TimeDelta, NumericOperators) {
EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
i64 * TimeDelta::FromMilliseconds(1000));
-
EXPECT_EQ(TimeDelta::FromMilliseconds(500),
TimeDelta::FromMilliseconds(1000) * 0.5);
EXPECT_EQ(TimeDelta::FromMilliseconds(2000),
@@ -943,10 +973,6 @@ TEST(TimeDelta, NumericOperators) {
2 * TimeDelta::FromMilliseconds(1000));
}
-bool IsMin(TimeDelta delta) {
- return (-delta).is_max();
-}
-
TEST(TimeDelta, Overflows) {
// Some sanity checks.
EXPECT_TRUE(TimeDelta::Max().is_max());
diff --git a/chromium/base/time/time_win.cc b/chromium/base/time/time_win.cc
index e9044603e9a..4543680741f 100644
--- a/chromium/base/time/time_win.cc
+++ b/chromium/base/time/time_win.cc
@@ -380,12 +380,10 @@ TimeDelta RolloverProtectedNow() {
using NowFunction = TimeDelta (*)(void);
TimeDelta InitialNowFunction();
-TimeDelta InitialSystemTraceNowFunction();
-// See "threading notes" in InitializeNowFunctionPointers() for details on how
+// See "threading notes" in InitializeNowFunctionPointer() for details on how
// concurrent reads/writes to these globals has been made safe.
NowFunction g_now_function = &InitialNowFunction;
-NowFunction g_system_trace_now_function = &InitialSystemTraceNowFunction;
int64 g_qpc_ticks_per_second = 0;
// As of January 2015, use of <atomic> is forbidden in Chromium code. This is
@@ -395,7 +393,7 @@ int64 g_qpc_ticks_per_second = 0;
TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
// Ensure that the assignment to |g_qpc_ticks_per_second|, made in
- // InitializeNowFunctionPointers(), has happened by this point.
+ // InitializeNowFunctionPointer(), has happened by this point.
ATOMIC_THREAD_FENCE(memory_order_acquire);
DCHECK_GT(g_qpc_ticks_per_second, 0);
@@ -427,37 +425,41 @@ bool IsBuggyAthlon(const base::CPU& cpu) {
return cpu.vendor_name() == "AuthenticAMD" && cpu.family() == 15;
}
-void InitializeNowFunctionPointers() {
+void InitializeNowFunctionPointer() {
LARGE_INTEGER ticks_per_sec = {};
if (!QueryPerformanceFrequency(&ticks_per_sec))
ticks_per_sec.QuadPart = 0;
- // If Windows cannot provide a QPC implementation, both TimeTicks::Now() and
- // TraceTicks::Now() must use the low-resolution clock.
+ // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
+ // the low-resolution clock.
//
// If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
- // will use the low-resolution clock, but TraceTicks::Now() will use the QPC
- // (in the hope that it is still useful for tracing purposes). A CPU lacking a
- // non-stop time counter will cause Windows to provide an alternate QPC
- // implementation that works, but is expensive to use. Certain Athlon CPUs are
- // known to make the QPC implementation unreliable.
+ // will still use the low-resolution clock. A CPU lacking a non-stop time
+ // counter will cause Windows to provide an alternate QPC implementation that
+ // works, but is expensive to use. Certain Athlon CPUs are known to make the
+ // QPC implementation unreliable.
//
- // Otherwise, both Now functions can use the high-resolution QPC clock. As of
- // 4 January 2015, ~68% of users fall within this category.
+ // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
+ // ~72% of users fall within this category.
+ //
+ // TraceTicks::Now() always uses the same clock as TimeTicks::Now(), even
+ // when the QPC exists but is expensive or unreliable. This is because we'd
+ // eventually like to merge TraceTicks and TimeTicks and have one type of
+ // timestamp that is reliable, monotonic, and comparable. Also, while we could
+ // use the high-resolution timer for TraceTicks even when it's unreliable or
+ // slow, it's easier to make tracing tools accommodate a coarse timer than
+ // one that's unreliable or slow.
NowFunction now_function;
- NowFunction system_trace_now_function;
base::CPU cpu;
- if (ticks_per_sec.QuadPart <= 0) {
- now_function = system_trace_now_function = &RolloverProtectedNow;
- } else if (!cpu.has_non_stop_time_stamp_counter() || IsBuggyAthlon(cpu)) {
+ if (ticks_per_sec.QuadPart <= 0 ||
+ !cpu.has_non_stop_time_stamp_counter() || IsBuggyAthlon(cpu)) {
now_function = &RolloverProtectedNow;
- system_trace_now_function = &QPCNow;
} else {
- now_function = system_trace_now_function = &QPCNow;
+ now_function = &QPCNow;
}
// Threading note 1: In an unlikely race condition, it's possible for two or
- // more threads to enter InitializeNowFunctionPointers() in parallel. This is
+ // more threads to enter InitializeNowFunctionPointer() in parallel. This is
// not a problem since all threads should end up writing out the same values
// to the global variables.
//
@@ -468,19 +470,13 @@ void InitializeNowFunctionPointers() {
g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
ATOMIC_THREAD_FENCE(memory_order_release);
g_now_function = now_function;
- g_system_trace_now_function = system_trace_now_function;
}
TimeDelta InitialNowFunction() {
- InitializeNowFunctionPointers();
+ InitializeNowFunctionPointer();
return g_now_function();
}
-TimeDelta InitialSystemTraceNowFunction() {
- InitializeNowFunctionPointers();
- return g_system_trace_now_function();
-}
-
} // namespace
// static
@@ -502,7 +498,7 @@ TimeTicks TimeTicks::Now() {
// static
bool TimeTicks::IsHighResolution() {
if (g_now_function == &InitialNowFunction)
- InitializeNowFunctionPointers();
+ InitializeNowFunctionPointer();
return g_now_function == &QPCNow;
}
@@ -514,7 +510,7 @@ ThreadTicks ThreadTicks::Now() {
// static
TraceTicks TraceTicks::Now() {
- return TraceTicks() + g_system_trace_now_function();
+ return TraceTicks() + g_now_function();
}
// static
diff --git a/chromium/base/timer/timer.h b/chromium/base/timer/timer.h
index 1ef58a3ea0b..c5bd9ced5ae 100644
--- a/chromium/base/timer/timer.h
+++ b/chromium/base/timer/timer.h
@@ -29,7 +29,7 @@
// // This method is called every second to do stuff.
// ...
// }
-// base::RepeatingTimer<MyClass> timer_;
+// base::RepeatingTimer timer_;
// };
//
// Both OneShotTimer and RepeatingTimer also support a Reset method, which
@@ -200,11 +200,8 @@ class BASE_EXPORT Timer {
//-----------------------------------------------------------------------------
// This class is an implementation detail of OneShotTimer and RepeatingTimer.
// Please do not use this class directly.
-template <class Receiver, bool kIsRepeating>
class BaseTimerMethodPointer : public Timer {
public:
- typedef void (Receiver::*ReceiverMethod)();
-
// This is here to work around the fact that Timer::Start is "hidden" by the
// Start definition below, rather than being overloaded.
// TODO(tim): We should remove uses of BaseTimerMethodPointer::Start below
@@ -212,15 +209,18 @@ class BaseTimerMethodPointer : public Timer {
// see bug 148832.
using Timer::Start;
- BaseTimerMethodPointer() : Timer(kIsRepeating, kIsRepeating) {}
+ enum RepeatMode { ONE_SHOT, REPEATING };
+ BaseTimerMethodPointer(RepeatMode mode)
+ : Timer(mode == REPEATING, mode == REPEATING) {}
// Start the timer to run at the given |delay| from now. If the timer is
// already running, it will be replaced to call a task formed from
// |reviewer->*method|.
- virtual void Start(const tracked_objects::Location& posted_from,
- TimeDelta delay,
- Receiver* receiver,
- ReceiverMethod method) {
+ template <class Receiver>
+ void Start(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ Receiver* receiver,
+ void (Receiver::*method)()) {
Timer::Start(posted_from, delay,
base::Bind(method, base::Unretained(receiver)));
}
@@ -228,13 +228,17 @@ class BaseTimerMethodPointer : public Timer {
//-----------------------------------------------------------------------------
// A simple, one-shot timer. See usage notes at the top of the file.
-template <class Receiver>
-class OneShotTimer : public BaseTimerMethodPointer<Receiver, false> {};
+class OneShotTimer : public BaseTimerMethodPointer {
+ public:
+ OneShotTimer() : BaseTimerMethodPointer(ONE_SHOT) {}
+};
//-----------------------------------------------------------------------------
// A simple, repeating timer. See usage notes at the top of the file.
-template <class Receiver>
-class RepeatingTimer : public BaseTimerMethodPointer<Receiver, true> {};
+class RepeatingTimer : public BaseTimerMethodPointer {
+ public:
+ RepeatingTimer() : BaseTimerMethodPointer(REPEATING) {}
+};
//-----------------------------------------------------------------------------
// A Delay timer is like The Button from Lost. Once started, you have to keep
@@ -247,22 +251,29 @@ class RepeatingTimer : public BaseTimerMethodPointer<Receiver, true> {};
//
// If destroyed, the timeout is canceled and will not occur even if already
// inflight.
-template <class Receiver>
class DelayTimer : protected Timer {
public:
- typedef void (Receiver::*ReceiverMethod)();
-
+ template <class Receiver>
DelayTimer(const tracked_objects::Location& posted_from,
TimeDelta delay,
Receiver* receiver,
- ReceiverMethod method)
- : Timer(posted_from, delay,
+ void (Receiver::*method)())
+ : Timer(posted_from,
+ delay,
base::Bind(method, base::Unretained(receiver)),
false) {}
- void Reset() override { Timer::Reset(); }
+ void Reset() override;
};
+// This class has a templated method so it can not be exported without failing
+// to link in MSVC. But clang-plugin does not allow inline definitions of
+// virtual methods, so the inline definition lives in the header file here
+// to satisfy both.
+inline void DelayTimer::Reset() {
+ Timer::Reset();
+}
+
} // namespace base
#endif // BASE_TIMER_TIMER_H_
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index 7213b809b7b..35e4315ea46 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -51,17 +51,15 @@ class OneShotTimerTester {
}
bool* did_run_;
- base::OneShotTimer<OneShotTimerTester> timer_;
+ base::OneShotTimer timer_;
const unsigned delay_ms_;
bool quit_message_loop_;
};
class OneShotSelfDeletingTimerTester {
public:
- explicit OneShotSelfDeletingTimerTester(bool* did_run) :
- did_run_(did_run),
- timer_(new base::OneShotTimer<OneShotSelfDeletingTimerTester>()) {
- }
+ explicit OneShotSelfDeletingTimerTester(bool* did_run)
+ : did_run_(did_run), timer_(new base::OneShotTimer()) {}
void Start() {
timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(10), this,
@@ -76,7 +74,7 @@ class OneShotSelfDeletingTimerTester {
}
bool* did_run_;
- scoped_ptr<base::OneShotTimer<OneShotSelfDeletingTimerTester> > timer_;
+ scoped_ptr<base::OneShotTimer> timer_;
};
class RepeatingTimerTester {
@@ -101,7 +99,7 @@ class RepeatingTimerTester {
bool* did_run_;
int counter_;
TimeDelta delay_;
- base::RepeatingTimer<RepeatingTimerTester> timer_;
+ base::RepeatingTimer timer_;
};
void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
@@ -205,8 +203,8 @@ void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
// If Delay is never called, the timer shouldn't go off.
DelayTimerTarget target;
- base::DelayTimer<DelayTimerTarget> timer(FROM_HERE,
- TimeDelta::FromMilliseconds(1), &target, &DelayTimerTarget::Signal);
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
bool did_run = false;
OneShotTimerTester tester(&did_run);
@@ -220,8 +218,8 @@ void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
base::MessageLoop loop(message_loop_type);
DelayTimerTarget target;
- base::DelayTimer<DelayTimerTarget> timer(FROM_HERE,
- TimeDelta::FromMilliseconds(1), &target, &DelayTimerTarget::Signal);
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
timer.Reset();
bool did_run = false;
@@ -233,11 +231,8 @@ void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
}
struct ResetHelper {
- ResetHelper(base::DelayTimer<DelayTimerTarget>* timer,
- DelayTimerTarget* target)
- : timer_(timer),
- target_(target) {
- }
+ ResetHelper(base::DelayTimer* timer, DelayTimerTarget* target)
+ : timer_(timer), target_(target) {}
void Reset() {
ASSERT_FALSE(target_->signaled());
@@ -245,8 +240,8 @@ struct ResetHelper {
}
private:
- base::DelayTimer<DelayTimerTarget> *const timer_;
- DelayTimerTarget *const target_;
+ base::DelayTimer* const timer_;
+ DelayTimerTarget* const target_;
};
void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
@@ -254,13 +249,13 @@ void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
// If Delay is never called, the timer shouldn't go off.
DelayTimerTarget target;
- base::DelayTimer<DelayTimerTarget> timer(FROM_HERE,
- TimeDelta::FromMilliseconds(50), &target, &DelayTimerTarget::Signal);
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerTarget::Signal);
timer.Reset();
ResetHelper reset_helper(&timer, &target);
- base::OneShotTimer<ResetHelper> timers[20];
+ base::OneShotTimer timers[20];
for (size_t i = 0; i < arraysize(timers); ++i) {
timers[i].Start(FROM_HERE, TimeDelta::FromMilliseconds(i * 10),
&reset_helper, &ResetHelper::Reset);
@@ -288,9 +283,8 @@ void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
DelayTimerFatalTarget target;
{
- base::DelayTimer<DelayTimerFatalTarget> timer(
- FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
- &DelayTimerFatalTarget::Signal);
+ base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerFatalTarget::Signal);
timer.Reset();
}
diff --git a/chromium/base/tools_sanity_unittest.cc b/chromium/base/tools_sanity_unittest.cc
index c0541d139fb..4340fcd9ba5 100644
--- a/chromium/base/tools_sanity_unittest.cc
+++ b/chromium/base/tools_sanity_unittest.cc
@@ -339,4 +339,19 @@ TEST(ToolsSanityTest, AtomicsAreIgnored) {
EXPECT_EQ(kMagicValue, shared);
}
+#if defined(CFI_ENFORCEMENT)
+TEST(ToolsSanityTest, BadCast) {
+ class A {
+ virtual void f() {}
+ };
+
+ class B {
+ virtual void f() {}
+ };
+
+ A a;
+ EXPECT_DEATH((void)(B*)&a, "ILL_ILLOPN");
+}
+#endif
+
} // namespace base
diff --git a/chromium/base/trace_event/BUILD.gn b/chromium/base/trace_event/BUILD.gn
index 8f987479da4..a55c55d75ce 100644
--- a/chromium/base/trace_event/BUILD.gn
+++ b/chromium/base/trace_event/BUILD.gn
@@ -17,6 +17,8 @@ source_set("trace_event") {
"memory_dump_request_args.h",
"memory_dump_session_state.cc",
"memory_dump_session_state.h",
+ "memory_profiler_allocation_context.cc",
+ "memory_profiler_allocation_context.h",
"process_memory_dump.cc",
"process_memory_dump.h",
"process_memory_maps.cc",
@@ -27,17 +29,19 @@ source_set("trace_event") {
"process_memory_totals.h",
"process_memory_totals_dump_provider.cc",
"process_memory_totals_dump_provider.h",
+ "trace_buffer.cc",
+ "trace_buffer.h",
"trace_config.cc",
"trace_config.h",
"trace_event.h",
"trace_event_android.cc",
"trace_event_argument.cc",
"trace_event_argument.h",
+ "trace_event_common.h",
"trace_event_etw_export_win.cc",
"trace_event_etw_export_win.h",
"trace_event_impl.cc",
"trace_event_impl.h",
- "trace_event_impl_constants.cc",
"trace_event_memory.cc",
"trace_event_memory.h",
"trace_event_memory_overhead.cc",
@@ -48,6 +52,11 @@ source_set("trace_event") {
"trace_event_system_stats_monitor.h",
"trace_event_win.cc",
"trace_event_win.h",
+ "trace_log.cc",
+ "trace_log.h",
+ "trace_log_constants.cc",
+ "trace_sampling_thread.cc",
+ "trace_sampling_thread.h",
"winheap_dump_provider_win.cc",
"winheap_dump_provider_win.h",
]
@@ -95,9 +104,11 @@ source_set("trace_event_unittests") {
"java_heap_dump_provider_android_unittest.cc",
"memory_allocator_dump_unittest.cc",
"memory_dump_manager_unittest.cc",
+ "memory_profiler_allocation_context_unittest.cc",
"process_memory_dump_unittest.cc",
"process_memory_maps_dump_provider_unittest.cc",
"process_memory_totals_dump_provider_unittest.cc",
+ "trace_config_memory_test_util.h",
"trace_config_unittest.cc",
"trace_event_argument_unittest.cc",
"trace_event_memory_unittest.cc",
diff --git a/chromium/base/trace_event/OWNERS b/chromium/base/trace_event/OWNERS
index aa1d675f75a..5136401580d 100644
--- a/chromium/base/trace_event/OWNERS
+++ b/chromium/base/trace_event/OWNERS
@@ -1,4 +1,5 @@
nduca@chromium.org
dsinclair@chromium.org
primiano@chromium.org
+simonhatch@chromium.org
per-file trace_event_android.cc=wangxianzhu@chromium.org
diff --git a/chromium/base/trace_event/java_heap_dump_provider_android.cc b/chromium/base/trace_event/java_heap_dump_provider_android.cc
index 4f5986d92ce..684f7301cfb 100644
--- a/chromium/base/trace_event/java_heap_dump_provider_android.cc
+++ b/chromium/base/trace_event/java_heap_dump_provider_android.cc
@@ -24,7 +24,8 @@ JavaHeapDumpProvider::~JavaHeapDumpProvider() {
// Called at trace dump point time. Creates a snapshot with the memory counters
// for the current process.
-bool JavaHeapDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
+bool JavaHeapDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
// These numbers come from java.lang.Runtime stats.
long total_heap_size = 0;
long free_heap_size = 0;
diff --git a/chromium/base/trace_event/java_heap_dump_provider_android.h b/chromium/base/trace_event/java_heap_dump_provider_android.h
index 2f31047b50b..e69c28102ad 100644
--- a/chromium/base/trace_event/java_heap_dump_provider_android.h
+++ b/chromium/base/trace_event/java_heap_dump_provider_android.h
@@ -17,7 +17,8 @@ class BASE_EXPORT JavaHeapDumpProvider : public MemoryDumpProvider {
static JavaHeapDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
private:
friend struct DefaultSingletonTraits<JavaHeapDumpProvider>;
diff --git a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
index bbefba518d0..35f3f17fbb1 100644
--- a/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
+++ b/chromium/base/trace_event/java_heap_dump_provider_android_unittest.cc
@@ -13,8 +13,9 @@ namespace trace_event {
TEST(JavaHeapDumpProviderTest, JavaHeapDump) {
auto jhdp = JavaHeapDumpProvider::GetInstance();
scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- jhdp->OnMemoryDump(pmd.get());
+ jhdp->OnMemoryDump(dump_args, pmd.get());
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index 3e593b02897..28aa140d670 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -28,7 +28,8 @@ MallocDumpProvider::~MallocDumpProvider() {
// Called at trace dump point time. Creates a snapshot the memory counters for
// the current process.
-bool MallocDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
+bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
struct mallinfo info = mallinfo();
DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
@@ -38,7 +39,7 @@ bool MallocDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
// dlmalloc the total is given by |arena| + |hblkhd|.
// For more details see link: http://goo.gl/fMR8lF.
MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
- outer_dump->AddScalar("heap_virtual_size",
+ outer_dump->AddScalar("virtual_size",
MemoryAllocatorDump::kUnitsBytes,
info.arena + info.hblkhd);
diff --git a/chromium/base/trace_event/malloc_dump_provider.h b/chromium/base/trace_event/malloc_dump_provider.h
index 0a51ff75daa..f35199937ea 100644
--- a/chromium/base/trace_event/malloc_dump_provider.h
+++ b/chromium/base/trace_event/malloc_dump_provider.h
@@ -23,7 +23,8 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
static MallocDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
private:
friend struct DefaultSingletonTraits<MallocDumpProvider>;
diff --git a/chromium/base/trace_event/memory_allocator_dump.cc b/chromium/base/trace_event/memory_allocator_dump.cc
index 4037f946c9e..76d53eb4762 100644
--- a/chromium/base/trace_event/memory_allocator_dump.cc
+++ b/chromium/base/trace_event/memory_allocator_dump.cc
@@ -16,7 +16,7 @@ namespace base {
namespace trace_event {
const char MemoryAllocatorDump::kNameSize[] = "size";
-const char MemoryAllocatorDump::kNameObjectsCount[] = "objects_count";
+const char MemoryAllocatorDump::kNameObjectCount[] = "object_count";
const char MemoryAllocatorDump::kTypeScalar[] = "scalar";
const char MemoryAllocatorDump::kTypeString[] = "string";
const char MemoryAllocatorDump::kUnitsBytes[] = "bytes";
diff --git a/chromium/base/trace_event/memory_allocator_dump.h b/chromium/base/trace_event/memory_allocator_dump.h
index 2ded1733c3f..6ad3d64110e 100644
--- a/chromium/base/trace_event/memory_allocator_dump.h
+++ b/chromium/base/trace_event/memory_allocator_dump.h
@@ -34,7 +34,7 @@ class BASE_EXPORT MemoryAllocatorDump {
// Standard attribute |name|s for the AddScalar and AddString() methods.
static const char kNameSize[]; // To represent allocated space.
- static const char kNameObjectsCount[]; // To represent number of objects.
+ static const char kNameObjectCount[]; // To represent number of objects.
// Standard attribute |unit|s for the AddScalar and AddString() methods.
static const char kUnitsBytes[]; // Unit name to represent bytes.
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index 85b98d65511..f787e64f067 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -21,13 +21,14 @@ namespace {
class FakeMemoryAllocatorDumpProvider : public MemoryDumpProvider {
public:
- bool OnMemoryDump(ProcessMemoryDump* pmd) override {
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override {
MemoryAllocatorDump* root_heap =
pmd->CreateAllocatorDump("foobar_allocator");
root_heap->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 4096);
- root_heap->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ root_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 42);
root_heap->AddScalar("attr1", "units1", 1234);
root_heap->AddString("attr2", "units2", "string_value");
@@ -37,7 +38,7 @@ class FakeMemoryAllocatorDumpProvider : public MemoryDumpProvider {
pmd->CreateAllocatorDump("foobar_allocator/sub_heap");
sub_heap->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 1);
- sub_heap->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ sub_heap->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 3);
pmd->CreateAllocatorDump("foobar_allocator/sub_heap/empty");
@@ -125,8 +126,9 @@ TEST(MemoryAllocatorDumpTest, GuidGeneration) {
TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
FakeMemoryAllocatorDumpProvider fmadp;
ProcessMemoryDump pmd(make_scoped_refptr(new MemoryDumpSessionState()));
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
- fmadp.OnMemoryDump(&pmd);
+ fmadp.OnMemoryDump(dump_args, &pmd);
ASSERT_EQ(3u, pmd.allocator_dumps().size());
@@ -136,7 +138,7 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
EXPECT_EQ("foobar_allocator", root_heap->absolute_name());
CheckScalar(root_heap, MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 4096);
- CheckScalar(root_heap, MemoryAllocatorDump::kNameObjectsCount,
+ CheckScalar(root_heap, MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 42);
CheckScalar(root_heap, "attr1", "units1", 1234);
CheckString(root_heap, "attr2", MemoryAllocatorDump::kTypeString, "units2",
@@ -149,7 +151,7 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
EXPECT_EQ("foobar_allocator/sub_heap", sub_heap->absolute_name());
CheckScalar(sub_heap, MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, 1);
- CheckScalar(sub_heap, MemoryAllocatorDump::kNameObjectsCount,
+ CheckScalar(sub_heap, MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 3);
const MemoryAllocatorDump* empty_sub_heap =
pmd.GetAllocatorDump("foobar_allocator/sub_heap/empty");
@@ -159,7 +161,7 @@ TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
DictionaryValue* attrs = nullptr;
ASSERT_TRUE(raw_attrs->GetAsDictionary(&attrs));
ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameSize));
- ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectsCount));
+ ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectCount));
// Check that the AsValueInfo doesn't hit any DCHECK.
scoped_refptr<TracedValue> traced_value(new TracedValue());
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 5588bf29796..a2fa9539636 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -9,7 +9,6 @@
#include "base/atomic_sequence_num.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
-#include "base/hash.h"
#include "base/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/memory_dump_session_state.h"
@@ -39,42 +38,54 @@ namespace trace_event {
namespace {
-// TODO(primiano): this should be smarter and should do something similar to
-// trace event synthetic delays.
-const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra");
-
-// Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps.
-const int kHeavyMmapsDumpsRate = 8; // 250 ms * 8 = 2000 ms.
-const int kDumpIntervalMs = 250;
const int kTraceEventNumArgs = 1;
const char* kTraceEventArgNames[] = {"dumps"};
const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
uint32 g_periodic_dumps_count = 0;
+uint32 g_heavy_dumps_rate = 0;
MemoryDumpManager* g_instance_for_testing = nullptr;
-MemoryDumpProvider* g_mmaps_dump_provider = nullptr;
void RequestPeriodicGlobalDump() {
- MemoryDumpType dump_type = g_periodic_dumps_count == 0
- ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS
- : MemoryDumpType::PERIODIC_INTERVAL;
- if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate)
- g_periodic_dumps_count = 0;
+ MemoryDumpLevelOfDetail level_of_detail;
+ if (g_heavy_dumps_rate == 0) {
+ level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+ } else {
+ level_of_detail = g_periodic_dumps_count == 0
+ ? MemoryDumpLevelOfDetail::DETAILED
+ : MemoryDumpLevelOfDetail::LIGHT;
+
+ if (++g_periodic_dumps_count == g_heavy_dumps_rate)
+ g_periodic_dumps_count = 0;
+ }
- MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type);
+ MemoryDumpManager::GetInstance()->RequestGlobalDump(
+ MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
}
} // namespace
// static
-const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory;
+const char* const MemoryDumpManager::kTraceCategory =
+ TRACE_DISABLED_BY_DEFAULT("memory-infra");
+
+// static
+const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
// static
const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0;
// static
-const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
+const char* const MemoryDumpManager::kSystemAllocatorPoolName =
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+ MallocDumpProvider::kAllocatedObjects;
+#elif defined(OS_WIN)
+ WinHeapDumpProvider::kAllocatedObjects;
+#else
+ nullptr;
+#endif
+
// static
MemoryDumpManager* MemoryDumpManager::GetInstance() {
@@ -93,8 +104,8 @@ void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
}
MemoryDumpManager::MemoryDumpManager()
- : did_unregister_dump_provider_(false),
- delegate_(nullptr),
+ : delegate_(nullptr),
+ is_coordinator_(false),
memory_tracing_enabled_(0),
tracing_process_id_(kInvalidTracingProcessId),
skip_core_dumpers_auto_registration_for_testing_(false) {
@@ -102,48 +113,46 @@ MemoryDumpManager::MemoryDumpManager()
}
MemoryDumpManager::~MemoryDumpManager() {
- base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
}
-void MemoryDumpManager::Initialize() {
- TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
- trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this);
-
- if (skip_core_dumpers_auto_registration_for_testing_)
- return;
+void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
+ bool is_coordinator) {
+ {
+ AutoLock lock(lock_);
+ DCHECK(delegate);
+ DCHECK(!delegate_);
+ delegate_ = delegate;
+ is_coordinator_ = is_coordinator;
+ }
// Enable the core dump providers.
+ if (!skip_core_dumpers_auto_registration_for_testing_) {
#if !defined(OS_NACL)
- RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
+ RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
- g_mmaps_dump_provider = ProcessMemoryMapsDumpProvider::GetInstance();
-
- // The memory maps dump provider is currently disabled for security reasons
- // and will be enabled once tracing is more secure (crbug.com/517906).
- // It is still enabled for running benchmarks.
- if (CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
- RegisterDumpProvider(g_mmaps_dump_provider);
- }
-
- RegisterDumpProvider(MallocDumpProvider::GetInstance());
+ RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
+ RegisterDumpProvider(MallocDumpProvider::GetInstance());
#endif
#if defined(OS_ANDROID)
- RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
+ RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
#endif
#if defined(OS_WIN)
- RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
+ RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
#endif
-}
+ } // !skip_core_dumpers_auto_registration_for_testing_
-void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) {
- AutoLock lock(lock_);
- DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_);
- delegate_ = delegate;
+ // If tracing was enabled before initializing MemoryDumpManager, we missed the
+ // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
+ TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
+ TraceLog::GetInstance()->AddEnabledStateObserver(this);
+ if (is_tracing_already_enabled)
+ OnTraceLogEnabled();
}
void MemoryDumpManager::RegisterDumpProvider(
@@ -151,7 +160,16 @@ void MemoryDumpManager::RegisterDumpProvider(
const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
MemoryDumpProviderInfo mdp_info(mdp, task_runner);
AutoLock lock(lock_);
- dump_providers_.insert(mdp_info);
+ auto iter_new = dump_providers_.insert(mdp_info);
+
+ // If there was a previous entry, replace it with the new one. This is to deal
+ // with the case where a dump provider unregisters itself and then re-
+ // registers before a memory dump happens, so its entry was still in the
+ // collection but flagged |unregistered|.
+ if (!iter_new.second) {
+ dump_providers_.erase(iter_new.first);
+ dump_providers_.insert(mdp_info);
+ }
}
void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
@@ -182,40 +200,44 @@ void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
<< "The MemoryDumpProvider attempted to unregister itself in a racy way. "
<< "Please file a crbug.";
- dump_providers_.erase(mdp_iter);
- did_unregister_dump_provider_ = true;
+ mdp_iter->unregistered = true;
}
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail,
const MemoryDumpCallback& callback) {
// Bail out immediately if tracing is not enabled at all.
- if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)))
+ if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
+ if (!callback.is_null())
+ callback.Run(0u /* guid */, false /* success */);
return;
+ }
const uint64 guid =
TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext());
- // The delegate_ is supposed to be thread safe, immutable and long lived.
- // No need to keep the lock after we ensure that a delegate has been set.
+ // Technically there is no need to grab the |lock_| here as the delegate is
+ // long-lived and can only be set by Initialize(), which is locked and
+ // necessarily happens before memory_tracing_enabled_ == true.
+ // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
+ // (memory-infra is enabled) we're not in the fast-path anymore.
MemoryDumpManagerDelegate* delegate;
{
AutoLock lock(lock_);
delegate = delegate_;
}
- if (delegate) {
- // The delegate is in charge to coordinate the request among all the
- // processes and call the CreateLocalDumpPoint on the local process.
- MemoryDumpRequestArgs args = {guid, dump_type};
- delegate->RequestGlobalMemoryDump(args, callback);
- } else if (!callback.is_null()) {
- callback.Run(guid, false /* success */);
- }
+ // The delegate will coordinate the IPC broadcast and at some point invoke
+ // CreateProcessDump() to get a dump for the current process.
+ MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
+ delegate->RequestGlobalMemoryDump(args, callback);
}
-void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) {
- RequestGlobalDump(dump_type, MemoryDumpCallback());
+void MemoryDumpManager::RequestGlobalDump(
+ MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail) {
+ RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
}
void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
@@ -223,7 +245,6 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
{
AutoLock lock(lock_);
- did_unregister_dump_provider_ = false;
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_.begin(), session_state_, callback));
}
@@ -263,25 +284,10 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
bool skip_dump = false;
{
AutoLock lock(lock_);
- // In the unlikely event that a dump provider was unregistered while
- // dumping, abort the dump, as that would make |next_dump_provider| invalid.
- // Registration, on the other hand, is safe as per std::set<> contract.
- if (did_unregister_dump_provider_) {
- return AbortDumpLocked(pmd_async_state->callback,
- pmd_async_state->task_runner,
- pmd_async_state->req_args.dump_guid);
- }
- auto* mdp_info = &*pmd_async_state->next_dump_provider;
+ auto mdp_info = pmd_async_state->next_dump_provider;
mdp = mdp_info->dump_provider;
- if (mdp_info->disabled) {
- skip_dump = true;
- } else if (mdp == g_mmaps_dump_provider &&
- pmd_async_state->req_args.dump_type !=
- MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS) {
- // Mmaps dumping is very heavyweight and cannot be performed at the same
- // rate of other dumps. TODO(primiano): this is a hack and should be
- // cleaned up as part of crbug.com/499731.
+ if (mdp_info->disabled || mdp_info->unregistered) {
skip_dump = true;
} else if (mdp_info->task_runner &&
!mdp_info->task_runner->BelongsToCurrentThread()) {
@@ -312,17 +318,16 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
// Invoke the dump provider without holding the |lock_|.
bool finalize = false;
bool dump_successful = false;
- if (!skip_dump)
- dump_successful = mdp->OnMemoryDump(&pmd_async_state->process_memory_dump);
+
+ if (!skip_dump) {
+ MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+ dump_successful =
+ mdp->OnMemoryDump(args, &pmd_async_state->process_memory_dump);
+ }
{
AutoLock lock(lock_);
- if (did_unregister_dump_provider_) {
- return AbortDumpLocked(pmd_async_state->callback,
- pmd_async_state->task_runner,
- pmd_async_state->req_args.dump_guid);
- }
- auto* mdp_info = &*pmd_async_state->next_dump_provider;
+ auto mdp_info = pmd_async_state->next_dump_provider;
if (dump_successful) {
mdp_info->consecutive_failures = 0;
} else if (!skip_dump) {
@@ -333,6 +338,9 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
}
++pmd_async_state->next_dump_provider;
finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
+
+ if (mdp_info->unregistered)
+ dump_providers_.erase(mdp_info);
}
if (!skip_dump && !dump_successful) {
@@ -359,9 +367,12 @@ void MemoryDumpManager::FinalizeDumpAndAddToTrace(
return;
}
- scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue());
- pmd_async_state->process_memory_dump.AsValueInto(
- static_cast<TracedValue*>(event_value.get()));
+ TracedValue* traced_value = new TracedValue();
+ scoped_refptr<ConvertableToTraceFormat> event_value(traced_value);
+ pmd_async_state->process_memory_dump.AsValueInto(traced_value);
+ traced_value->SetString("level_of_detail",
+ MemoryDumpLevelOfDetailToString(
+ pmd_async_state->req_args.level_of_detail));
const char* const event_name =
MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
@@ -394,11 +405,10 @@ void MemoryDumpManager::AbortDumpLocked(
}
void MemoryDumpManager::OnTraceLogEnabled() {
- // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter
- // to figure out (and cache) which dumpers should be enabled or not.
- // For the moment piggy back everything on the generic "memory" category.
bool enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
+ if (!enabled)
+ return;
// Initialize the TraceLog for the current thread. This is to avoid that the
// TraceLog memory dump provider is registered lazily in the PostTask() below
@@ -407,13 +417,7 @@ void MemoryDumpManager::OnTraceLogEnabled() {
AutoLock lock(lock_);
- // There is no point starting the tracing without a delegate.
- if (!enabled || !delegate_) {
- // Disable all the providers.
- for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it)
- it->disabled = true;
- return;
- }
+ DCHECK(delegate_); // At this point we must have a delegate.
session_state_ = new MemoryDumpSessionState();
for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
@@ -423,12 +427,43 @@ void MemoryDumpManager::OnTraceLogEnabled() {
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
- if (delegate_->IsCoordinatorProcess()) {
- g_periodic_dumps_count = 0;
- periodic_dump_timer_.Start(FROM_HERE,
- TimeDelta::FromMilliseconds(kDumpIntervalMs),
- base::Bind(&RequestPeriodicGlobalDump));
+ // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+ // when running memory benchmarks until telemetry uses TraceConfig to
+ // enable/disable periodic dumps. See crbug.com/529184 .
+ if (!is_coordinator_ ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ "enable-memory-benchmarking")) {
+ return;
}
+
+ // Enable periodic dumps. At the moment the periodic support is limited to at
+ // most one low-detail periodic dump and at most one high-detail periodic
+ // dump. If both are specified the high-detail period must be an integer
+ // multiple of the low-level one.
+ g_periodic_dumps_count = 0;
+ const TraceConfig trace_config =
+ TraceLog::GetInstance()->GetCurrentTraceConfig();
+ const TraceConfig::MemoryDumpConfig& config_list =
+ trace_config.memory_dump_config();
+ if (config_list.empty())
+ return;
+
+ uint32 min_timer_period_ms = std::numeric_limits<uint32>::max();
+ uint32 heavy_dump_period_ms = 0;
+ DCHECK_LE(config_list.size(), 2u);
+ for (const TraceConfig::MemoryDumpTriggerConfig& config : config_list) {
+ DCHECK(config.periodic_interval_ms);
+ if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
+ heavy_dump_period_ms = config.periodic_interval_ms;
+ min_timer_period_ms =
+ std::min(min_timer_period_ms, config.periodic_interval_ms);
+ }
+ DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+ g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
+
+ periodic_dump_timer_.Start(FROM_HERE,
+ TimeDelta::FromMilliseconds(min_timer_period_ms),
+ base::Bind(&RequestPeriodicGlobalDump));
}
void MemoryDumpManager::OnTraceLogDisabled() {
@@ -438,13 +473,8 @@ void MemoryDumpManager::OnTraceLogDisabled() {
session_state_ = nullptr;
}
-// static
-uint64 MemoryDumpManager::ChildProcessIdToTracingProcessId(
- int child_process_id) {
- return static_cast<uint64>(
- Hash(reinterpret_cast<const char*>(&child_process_id),
- sizeof(child_process_id))) +
- 1;
+uint64 MemoryDumpManager::GetTracingProcessId() const {
+ return delegate_->GetTracingProcessId();
}
MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
@@ -453,8 +483,8 @@ MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
: dump_provider(dump_provider),
task_runner(task_runner),
consecutive_failures(0),
- disabled(false) {
-}
+ disabled(false),
+ unregistered(false) {}
MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
}
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 92c5cb867db..0f352ed7797 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -32,17 +32,27 @@ class MemoryDumpSessionState;
// RequestDumpPoint(). The extension by Un(RegisterDumpProvider).
class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
public:
+ static const char* const kTraceCategory;
+
+ // This value is returned as the tracing id of the child processes by
+ // GetTracingProcessId() when tracing is not enabled.
static const uint64 kInvalidTracingProcessId;
- static const char* const kTraceCategoryForTesting;
static MemoryDumpManager* GetInstance();
- // Invoked once per process to register the TraceLog observer.
- void Initialize();
-
- // See the lifetime and thread-safety requirements on the delegate below in
- // the |MemoryDumpManagerDelegate| docstring.
- void SetDelegate(MemoryDumpManagerDelegate* delegate);
+ // Invoked once per process to listen to trace begin / end events.
+ // Initialization can happen after (Un)RegisterMemoryDumpProvider() calls
+ // and the MemoryDumpManager guarantees to support this.
+ // On the other side, the MemoryDumpManager will not be fully operational
+ // (i.e. will NACK any RequestGlobalMemoryDump()) until initialized.
+ // Arguments:
+ // is_coordinator: if true this MemoryDumpManager instance will act as a
+ // coordinator and schedule periodic dumps (if enabled via TraceConfig);
+ // false when the MemoryDumpManager is initialized in a slave process.
+ // delegate: inversion-of-control interface for embedder-specific behaviors
+ // (multiprocess handshaking). See the lifetime and thread-safety
+ // requirements in the |MemoryDumpManagerDelegate| docstring.
+ void Initialize(MemoryDumpManagerDelegate* delegate, bool is_coordinator);
// MemoryDumpManager does NOT take memory ownership of |mdp|, which is
// expected to either be a singleton or unregister itself.
@@ -62,10 +72,12 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// processes have dumped) and its success (true iff all the dumps were
// successful).
void RequestGlobalDump(MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail,
const MemoryDumpCallback& callback);
// Same as above (still asynchronous), but without callback.
- void RequestGlobalDump(MemoryDumpType dump_type);
+ void RequestGlobalDump(MemoryDumpType dump_type,
+ MemoryDumpLevelOfDetail level_of_detail);
// TraceLog::EnabledStateObserver implementation.
void OnTraceLogEnabled() override;
@@ -78,28 +90,25 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
return session_state_;
}
- // Derives a tracing process id from a child process id. Child process ids
- // cannot be used directly in tracing for security reasons (see: discussion in
- // crrev.com/1173263004). This method is meant to be used when dumping
- // cross-process shared memory from a process which knows the child process id
- // of its endpoints. The value returned by this method is guaranteed to be
- // equal to the value returned by tracing_process_id() in the corresponding
- // child process.
- // This will never return kInvalidTracingProcessId.
- static uint64 ChildProcessIdToTracingProcessId(int child_id);
-
- // Returns a unique id for the current process. The id can be retrieved only
- // by child processes and only when tracing is enabled. This is intended to
- // express cross-process sharing of memory dumps on the child-process side,
- // without having to know its own child process id.
- uint64 tracing_process_id() const { return tracing_process_id_; }
+ // Returns a unique id for identifying the processes. The id can be
+ // retrieved by child processes only when tracing is enabled. This is
+ // intended to express cross-process sharing of memory dumps on the
+ // child-process side, without having to know its own child process id.
+ uint64 GetTracingProcessId() const;
+
+ // Returns the name for a the allocated_objects dump. Use this to declare
+ // suballocator dumps from other dump providers.
+ // It will return nullptr if there is no dump provider for the system
+ // allocator registered (which is currently the case for Mac OS).
+ const char* system_allocator_pool_name() const {
+ return kSystemAllocatorPoolName;
+ };
private:
friend struct DefaultDeleter<MemoryDumpManager>; // For the testing instance.
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
- FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, DisableFailingDumpers);
// Descriptor struct used to hold information about registered MDPs. It is
// deliberately copyable, in order to allow it to be used as std::set value.
@@ -120,6 +129,11 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// as can be safely changed without impacting the order within the set.
mutable int consecutive_failures;
mutable bool disabled;
+
+ // When a dump provider unregisters, it is flagged as |unregistered| and it
+ // is removed only upon the next memory dump. This is to avoid altering the
+ // |dump_providers_| collection while a dump is in progress.
+ mutable bool unregistered;
};
using MemoryDumpProviderInfoSet = std::set<MemoryDumpProviderInfo>;
@@ -159,9 +173,10 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
};
static const int kMaxConsecutiveFailuresCount;
+ static const char* const kSystemAllocatorPoolName;
MemoryDumpManager();
- virtual ~MemoryDumpManager();
+ ~MemoryDumpManager() override;
static void SetInstanceForTesting(MemoryDumpManager* instance);
static void FinalizeDumpAndAddToTrace(
@@ -182,26 +197,18 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void ContinueAsyncProcessDump(
scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
- // Pass kInvalidTracingProcessId to invalidate the id.
- void set_tracing_process_id(uint64 id) {
- DCHECK(tracing_process_id_ == kInvalidTracingProcessId ||
- id == kInvalidTracingProcessId || tracing_process_id_ == id);
- tracing_process_id_ = id;
- }
-
// An ordererd set of registered MemoryDumpProviderInfo(s), sorted by thread
// affinity (MDPs belonging to the same thread are adjacent).
MemoryDumpProviderInfoSet dump_providers_;
- // Flag used to signal that some provider was removed from |dump_providers_|
- // and therefore the current memory dump (if any) should be aborted.
- bool did_unregister_dump_provider_;
-
// Shared among all the PMDs to keep state scoped to the tracing session.
scoped_refptr<MemoryDumpSessionState> session_state_;
MemoryDumpManagerDelegate* delegate_; // Not owned.
+ // When true, this instance is in charge of coordinating periodic dumps.
+ bool is_coordinator_;
+
// Protects from concurrent accesses to the |dump_providers_*| and |delegate_|
// to guard against disabling logging while dumping on another thread.
Lock lock_;
@@ -211,7 +218,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
subtle::AtomicWord memory_tracing_enabled_;
// For time-triggered periodic dumps.
- RepeatingTimer<MemoryDumpManager> periodic_dump_timer_;
+ RepeatingTimer periodic_dump_timer_;
// The unique id of the child process. This is created only for tracing and is
// expected to be valid only when tracing is enabled.
@@ -230,9 +237,9 @@ class BASE_EXPORT MemoryDumpManagerDelegate {
virtual void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback) = 0;
- // Determines whether the MemoryDumpManager instance should be the master
- // (the ones which initiates and coordinates the multiprocess dumps) or not.
- virtual bool IsCoordinatorProcess() const = 0;
+ // Returns tracing process id of the current process. This is used by
+ // MemoryDumpManager::GetTracingProcessId.
+ virtual uint64 GetTracingProcessId() const = 0;
protected:
MemoryDumpManagerDelegate() {}
@@ -243,10 +250,6 @@ class BASE_EXPORT MemoryDumpManagerDelegate {
MemoryDumpManager::GetInstance()->CreateProcessDump(args, callback);
}
- void set_tracing_process_id(uint64 id) {
- MemoryDumpManager::GetInstance()->set_tracing_process_id(id);
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(MemoryDumpManagerDelegate);
};
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 53c7d9250da..af3287ddfde 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -8,14 +8,19 @@
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/test_io_thread.h"
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using testing::_;
+using testing::AnyNumber;
+using testing::AtMost;
using testing::Between;
using testing::Invoke;
using testing::Return;
@@ -23,32 +28,56 @@ using testing::Return;
namespace base {
namespace trace_event {
-// Testing MemoryDumpManagerDelegate which short-circuits dump requests locally
-// instead of performing IPC dances.
+// GTest matchers for MemoryDumpRequestArgs arguments.
+MATCHER(IsDetailedDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
+}
+
+MATCHER(IsLightDump, "") {
+ return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
+}
+
+// Testing MemoryDumpManagerDelegate which, by default, short-circuits dump
+// requests locally to the MemoryDumpManager instead of performing IPC dances.
class MemoryDumpManagerDelegateForTesting : public MemoryDumpManagerDelegate {
public:
- void RequestGlobalMemoryDump(const MemoryDumpRequestArgs& args,
- const MemoryDumpCallback& callback) override {
- CreateProcessDump(args, callback);
+ MemoryDumpManagerDelegateForTesting() {
+ ON_CALL(*this, RequestGlobalMemoryDump(_, _))
+ .WillByDefault(Invoke(
+ this, &MemoryDumpManagerDelegateForTesting::CreateProcessDump));
+ }
+
+ MOCK_METHOD2(RequestGlobalMemoryDump,
+ void(const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback));
+
+ uint64 GetTracingProcessId() const override {
+ NOTREACHED();
+ return MemoryDumpManager::kInvalidTracingProcessId;
}
+};
- bool IsCoordinatorProcess() const override { return false; }
+class MockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+ MOCK_METHOD2(OnMemoryDump,
+ bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
};
class MemoryDumpManagerTest : public testing::Test {
public:
void SetUp() override {
+ last_callback_success_ = false;
message_loop_.reset(new MessageLoop());
mdm_.reset(new MemoryDumpManager());
MemoryDumpManager::SetInstanceForTesting(mdm_.get());
ASSERT_EQ(mdm_, MemoryDumpManager::GetInstance());
- MemoryDumpManager::GetInstance()->Initialize();
- MemoryDumpManager::GetInstance()->SetDelegate(&delegate_);
+ delegate_.reset(new MemoryDumpManagerDelegateForTesting);
}
void TearDown() override {
MemoryDumpManager::SetInstanceForTesting(nullptr);
mdm_.reset();
+ delegate_.reset();
message_loop_.reset();
TraceLog::DeleteForTesting();
}
@@ -57,198 +86,283 @@ class MemoryDumpManagerTest : public testing::Test {
Closure closure,
uint64 dump_guid,
bool success) {
+ last_callback_success_ = success;
task_runner->PostTask(FROM_HERE, closure);
}
protected:
- const char* kTraceCategory = MemoryDumpManager::kTraceCategoryForTesting;
-
- void EnableTracing(const char* category) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(category, ""), TraceLog::RECORDING_MODE);
+ void InitializeMemoryDumpManager(bool is_coordinator) {
+ mdm_->Initialize(delegate_.get(), is_coordinator);
}
- void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
-
- scoped_ptr<MemoryDumpManager> mdm_;
-
- private:
- scoped_ptr<MessageLoop> message_loop_;
- MemoryDumpManagerDelegateForTesting delegate_;
-
- // We want our singleton torn down after each test.
- ShadowingAtExitManager at_exit_manager_;
-};
-
-class MockDumpProvider : public MemoryDumpProvider {
- public:
- MockDumpProvider()
- : dump_provider_to_register_or_unregister(nullptr),
- last_session_state_(nullptr) {}
-
- // Ctor used by the RespectTaskRunnerAffinity test.
- explicit MockDumpProvider(
- const scoped_refptr<SingleThreadTaskRunner>& task_runner)
- : last_session_state_(nullptr), task_runner_(task_runner) {}
-
- virtual ~MockDumpProvider() {}
-
- MOCK_METHOD1(OnMemoryDump, bool(ProcessMemoryDump* pmd));
-
- // OnMemoryDump() override for the RespectTaskRunnerAffinity test.
- bool OnMemoryDump_CheckTaskRunner(ProcessMemoryDump* pmd) {
- EXPECT_TRUE(task_runner_->RunsTasksOnCurrentThread());
- return true;
+ void EnableTracingWithLegacyCategories(const char* category) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(category, ""),
+ TraceLog::RECORDING_MODE);
}
- // OnMemoryDump() override for the SharedSessionState test.
- bool OnMemoryDump_CheckSessionState(ProcessMemoryDump* pmd) {
- MemoryDumpSessionState* cur_session_state = pmd->session_state().get();
- if (last_session_state_)
- EXPECT_EQ(last_session_state_, cur_session_state);
- last_session_state_ = cur_session_state;
- return true;
+ void EnableTracingWithTraceConfig(const std::string& trace_config) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config),
+ TraceLog::RECORDING_MODE);
}
- // OnMemoryDump() override for the RegisterDumperWhileDumping test.
- bool OnMemoryDump_RegisterExtraDumpProvider(ProcessMemoryDump* pmd) {
- MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- dump_provider_to_register_or_unregister);
- return true;
+ void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); }
+
+ bool IsPeriodicDumpingEnabled() const {
+ return mdm_->periodic_dump_timer_.IsRunning();
}
- // OnMemoryDump() override for the UnegisterDumperWhileDumping test.
- bool OnMemoryDump_UnregisterDumpProvider(ProcessMemoryDump* pmd) {
- MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
- dump_provider_to_register_or_unregister);
- return true;
+ int GetMaxConsecutiveFailuresCount() const {
+ return MemoryDumpManager::kMaxConsecutiveFailuresCount;
}
- // Used by OnMemoryDump_(Un)RegisterExtraDumpProvider.
- MemoryDumpProvider* dump_provider_to_register_or_unregister;
+ scoped_ptr<MemoryDumpManager> mdm_;
+ scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+ bool last_callback_success_;
private:
- MemoryDumpSessionState* last_session_state_;
- scoped_refptr<SingleThreadTaskRunner> task_runner_;
+ scoped_ptr<MessageLoop> message_loop_;
+
+ // We want our singleton torn down after each test.
+ ShadowingAtExitManager at_exit_manager_;
};
+// Basic sanity checks. Registers a memory dump provider and checks that it is
+// called, but only when memory-infra is enabled.
TEST_F(MemoryDumpManagerTest, SingleDumper) {
- MockDumpProvider mdp;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
mdm_->RegisterDumpProvider(&mdp);
// Check that the dumper is not called if the memory category is not enabled.
- EnableTracing("foo-and-bar-but-not-memory");
- EXPECT_CALL(mdp, OnMemoryDump(_)).Times(0);
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ EnableTracingWithLegacyCategories("foobar-but-not-memory");
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
// Now repeat enabling the memory category and check that the dumper is
// invoked this time.
- EnableTracing(kTraceCategory);
- EXPECT_CALL(mdp, OnMemoryDump(_)).Times(3).WillRepeatedly(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(3).WillRepeatedly(Return(true));
for (int i = 0; i < 3; ++i)
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
mdm_->UnregisterDumpProvider(&mdp);
- // Finally check the unregister logic (no calls to the mdp after unregister).
- EnableTracing(kTraceCategory);
- EXPECT_CALL(mdp, OnMemoryDump(_)).Times(0);
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ // Finally check the unregister logic: the delegate will be invoked but not
+ // the dump provider, as it has been unregistered.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
TraceLog::GetInstance()->SetDisabled();
}
+// Checks that requesting dumps with high level of detail actually propagates
+// the level of the detail properly to OnMemoryDump() call on dump providers.
+TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
+
+ mdm_->RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ // Check that requesting dumps with low level of detail actually propagates to
+ // OnMemoryDump() call on dump providers.
+ mdm_->RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::LIGHT);
+ DisableTracing();
+ mdm_->UnregisterDumpProvider(&mdp);
+}
+
+// Checks that the SharedSessionState object is acqually shared over time.
TEST_F(MemoryDumpManagerTest, SharedSessionState) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
mdm_->RegisterDumpProvider(&mdp1);
mdm_->RegisterDumpProvider(&mdp2);
- EnableTracing(kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_))
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ const MemoryDumpSessionState* session_state = mdm_->session_state().get();
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(2)
- .WillRepeatedly(
- Invoke(&mdp1, &MockDumpProvider::OnMemoryDump_CheckSessionState));
- EXPECT_CALL(mdp2, OnMemoryDump(_))
+ .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ EXPECT_EQ(session_state, pmd->session_state().get());
+ return true;
+ }));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
.Times(2)
- .WillRepeatedly(
- Invoke(&mdp2, &MockDumpProvider::OnMemoryDump_CheckSessionState));
+ .WillRepeatedly(Invoke([session_state](const MemoryDumpArgs&,
+ ProcessMemoryDump* pmd) -> bool {
+ EXPECT_EQ(session_state, pmd->session_state().get());
+ return true;
+ }));
for (int i = 0; i < 2; ++i)
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
+// Checks that the (Un)RegisterDumpProvider logic behaves sanely.
TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
// Enable only mdp1.
mdm_->RegisterDumpProvider(&mdp1);
- EnableTracing(kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_)).Times(1).WillRepeatedly(Return(true));
- EXPECT_CALL(mdp2, OnMemoryDump(_)).Times(0);
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
// Invert: enable mdp1 and disable mdp2.
mdm_->UnregisterDumpProvider(&mdp1);
mdm_->RegisterDumpProvider(&mdp2);
- EnableTracing(kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_)).Times(0);
- EXPECT_CALL(mdp2, OnMemoryDump(_)).Times(1).WillRepeatedly(Return(true));
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
// Enable both mdp1 and mdp2.
mdm_->RegisterDumpProvider(&mdp1);
- EnableTracing(kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_)).Times(1).WillRepeatedly(Return(true));
- EXPECT_CALL(mdp2, OnMemoryDump(_)).Times(1).WillRepeatedly(Return(true));
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _)).WillOnce(Return(true));
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
DisableTracing();
}
+// Checks that the dump provider invocations depend only on the current
+// registration state and not on previous registrations and dumps.
+TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp;
+
+ mdm_->RegisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+
+ mdm_->RegisterDumpProvider(&mdp);
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+
+ mdm_->RegisterDumpProvider(&mdp);
+ mdm_->UnregisterDumpProvider(&mdp);
+ mdm_->RegisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).WillOnce(Return(true));
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ DisableTracing();
+ }
+}
+
// Checks that the MemoryDumpManager respects the thread affinity when a
// MemoryDumpProvider specifies a task_runner(). The test starts creating 8
// threads and registering a MemoryDumpProvider on each of them. At each
// iteration, one thread is removed, to check the live unregistration logic.
TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
const uint32 kNumInitialThreads = 8;
ScopedVector<Thread> threads;
- ScopedVector<MockDumpProvider> mdps;
+ ScopedVector<MockMemoryDumpProvider> mdps;
// Create the threads and setup the expectations. Given that at each iteration
// we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
// invoked a number of times equal to its index.
for (uint32 i = kNumInitialThreads; i > 0; --i) {
- threads.push_back(new Thread("test thread"));
+ Thread* thread = new Thread("test thread");
+ threads.push_back(thread);
threads.back()->Start();
- mdps.push_back(new MockDumpProvider(threads.back()->task_runner()));
- MockDumpProvider* mdp = mdps.back();
- mdm_->RegisterDumpProvider(mdp, threads.back()->task_runner());
- EXPECT_CALL(*mdp, OnMemoryDump(_))
+ scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
+ MockMemoryDumpProvider* mdp = new MockMemoryDumpProvider();
+ mdps.push_back(mdp);
+ mdm_->RegisterDumpProvider(mdp, task_runner);
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
.Times(i)
- .WillRepeatedly(
- Invoke(mdp, &MockDumpProvider::OnMemoryDump_CheckTaskRunner));
+ .WillRepeatedly(Invoke(
+ [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread());
+ return true;
+ }));
}
- EnableTracing(kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
while (!threads.empty()) {
+ last_callback_success_ = false;
{
RunLoop run_loop;
MemoryDumpCallback callback =
Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
MessageLoop::current()->task_runner(), run_loop.QuitClosure());
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED, callback);
- // This nested message loop (|run_loop|) will be quit if and only if
- // the RequestGlobalDump callback is invoked.
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ // This nested message loop (|run_loop|) will quit if and only if the
+ // |callback| passed to RequestGlobalDump() is invoked.
run_loop.Run();
}
+ EXPECT_TRUE(last_callback_success_);
// Unregister a MDP and destroy one thread at each iteration to check the
// live unregistration logic. The unregistration needs to happen on the same
@@ -270,89 +384,321 @@ TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
DisableTracing();
}
-// Enable both dump providers, make sure that mdp gets disabled after 3 failures
-// and not disabled after 1.
+// Checks that providers get disabled after 3 consecutive failures, but not
+// otherwise (e.g., if interleaved).
TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
mdm_->RegisterDumpProvider(&mdp1);
mdm_->RegisterDumpProvider(&mdp2);
- EnableTracing(kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(kNumDumps);
- EXPECT_CALL(mdp1, OnMemoryDump(_))
- .Times(MemoryDumpManager::kMaxConsecutiveFailuresCount)
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
+ .Times(GetMaxConsecutiveFailuresCount())
.WillRepeatedly(Return(false));
- EXPECT_CALL(mdp2, OnMemoryDump(_))
- .Times(1 + MemoryDumpManager::kMaxConsecutiveFailuresCount)
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
.WillOnce(Return(false))
- .WillRepeatedly(Return(true));
- for (int i = 0; i < 1 + MemoryDumpManager::kMaxConsecutiveFailuresCount;
- i++) {
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ .WillOnce(Return(true))
+ .WillOnce(Return(false))
+ .WillOnce(Return(false))
+ .WillOnce(Return(true))
+ .WillOnce(Return(false));
+
+ for (int i = 0; i < kNumDumps; i++) {
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
}
DisableTracing();
}
-// Sneakily register an extra memory dump provider while an existing one is
+// Sneakily registers an extra memory dump provider while an existing one is
// dumping and expect it to take part in the already active tracing session.
TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
- mdp1.dump_provider_to_register_or_unregister = &mdp2;
mdm_->RegisterDumpProvider(&mdp1);
- EnableTracing(kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
- EXPECT_CALL(mdp1, OnMemoryDump(_))
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(4)
.WillOnce(Return(true))
- .WillOnce(Invoke(
- &mdp1, &MockDumpProvider::OnMemoryDump_RegisterExtraDumpProvider))
+ .WillOnce(
+ Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(&mdp2);
+ return true;
+ }))
.WillRepeatedly(Return(true));
// Depending on the insertion order (before or after mdp1), mdp2 might be
// called also immediately after it gets registered.
- EXPECT_CALL(mdp2, OnMemoryDump(_))
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
.Times(Between(2, 3))
.WillRepeatedly(Return(true));
for (int i = 0; i < 4; i++) {
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
}
DisableTracing();
}
-// Like the above, but suddenly unregister the dump provider.
+// Like RegisterDumperWhileDumping, but unregister the dump provider instead.
TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
- MockDumpProvider mdp1;
- MockDumpProvider mdp2;
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ MockMemoryDumpProvider mdp2;
mdm_->RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
mdm_->RegisterDumpProvider(&mdp2, ThreadTaskRunnerHandle::Get());
- mdp1.dump_provider_to_register_or_unregister = &mdp2;
- EnableTracing(kTraceCategory);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
- EXPECT_CALL(mdp1, OnMemoryDump(_))
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
+
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _))
.Times(4)
.WillOnce(Return(true))
- .WillOnce(Invoke(&mdp1,
- &MockDumpProvider::OnMemoryDump_UnregisterDumpProvider))
+ .WillOnce(
+ Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
+ MemoryDumpManager::GetInstance()->UnregisterDumpProvider(&mdp2);
+ return true;
+ }))
.WillRepeatedly(Return(true));
// Depending on the insertion order (before or after mdp1), mdp2 might have
- // been already called when OnMemoryDump_UnregisterDumpProvider happens.
- EXPECT_CALL(mdp2, OnMemoryDump(_))
+ // been already called when UnregisterDumpProvider happens.
+ EXPECT_CALL(mdp2, OnMemoryDump(_, _))
.Times(Between(1, 2))
.WillRepeatedly(Return(true));
for (int i = 0; i < 4; i++) {
- mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ }
+
+ DisableTracing();
+}
+
+// Checks that the dump does not abort when unregistering a provider while
+// dumping from a different thread than the dumping thread.
+TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ ScopedVector<TestIOThread> threads;
+ ScopedVector<MockMemoryDumpProvider> mdps;
+
+ for (int i = 0; i < 2; i++) {
+ threads.push_back(new TestIOThread(TestIOThread::kAutoStart));
+ mdps.push_back(new MockMemoryDumpProvider());
+ mdm_->RegisterDumpProvider(mdps.back(), threads.back()->task_runner());
+ }
+
+ int on_memory_dump_call_count = 0;
+ RunLoop run_loop;
+
+ // When OnMemoryDump is called on either of the dump providers, it will
+ // unregister the other one.
+ for (MockMemoryDumpProvider* mdp : mdps) {
+ int other_idx = (mdps.front() == mdp);
+ TestIOThread* other_thread = threads[other_idx];
+ MockMemoryDumpProvider* other_mdp = mdps[other_idx];
+ auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
+ const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+ other_thread->PostTaskAndWait(
+ FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
+ base::Unretained(&*mdm_), other_mdp));
+ on_memory_dump_call_count++;
+ return true;
+ };
+
+ // OnMemoryDump is called once for the provider that dumps first, and zero
+ // times for the other provider.
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+ .Times(AtMost(1))
+ .WillOnce(Invoke(on_dump));
+ }
+
+ last_callback_success_ = false;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+
+ run_loop.Run();
+
+ ASSERT_EQ(1, on_memory_dump_call_count);
+ ASSERT_EQ(true, last_callback_success_);
+
+ DisableTracing();
+}
+
+// Checks that a NACK callback is invoked if RequestGlobalDump() is called when
+// tracing is not enabled.
+TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ mdm_->RegisterDumpProvider(&mdp1);
+
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+
+ last_callback_success_ = true;
+ {
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ run_loop.Run();
+ }
+ EXPECT_FALSE(last_callback_success_);
+}
+
+// Checks that is the MemoryDumpManager is initialized after tracing already
+// began, it will still late-join the party (real use case: startup tracing).
+TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
+ MockMemoryDumpProvider mdp;
+ mdm_->RegisterDumpProvider(&mdp);
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ // First check that a RequestGlobalDump() issued before the MemoryDumpManager
+ // initialization gets NACK-ed cleanly.
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ run_loop.Run();
+ EXPECT_FALSE(last_callback_success_);
+ }
+
+ // Now late-initialize the MemoryDumpManager and check that the
+ // RequestGlobalDump completes successfully.
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ RunLoop run_loop;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED, callback);
+ run_loop.Run();
+ EXPECT_TRUE(last_callback_success_);
}
+ DisableTracing();
+}
+
+// This test (and the MemoryDumpManagerTestCoordinator below) crystallizes the
+// expectations of the chrome://tracing UI and chrome telemetry w.r.t. periodic
+// dumps in memory-infra, handling gracefully the transition between the legacy
+// and the new-style (JSON-based) TraceConfig.
+TEST_F(MemoryDumpManagerTest, TraceConfigExpectations) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
+
+ // Don't trigger the default behavior of the mock delegate in this test,
+ // which would short-circuit the dump request to the actual
+ // CreateProcessDump().
+ // We don't want to create any dump in this test, only check whether the dumps
+ // are requested or not.
+ ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
+
+ // Enabling memory-infra in a non-coordinator process should not trigger any
+ // periodic dumps.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a non-coordinator
+ // process with a fully defined trigger config should NOT enable any periodic
+ // dumps.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(1, 5));
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TraceConfigExpectationsWhenIsCoordinator) {
+ InitializeMemoryDumpManager(true /* is_coordinator */);
+ MemoryDumpManagerDelegateForTesting& delegate = *delegate_;
+ ON_CALL(delegate, RequestGlobalMemoryDump(_, _)).WillByDefault(Return());
+
+ // Enabling memory-infra with the legacy TraceConfig (category filter) in
+ // a coordinator process should enable periodic dumps.
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+ EXPECT_TRUE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process without specifying any "memory_dump_config" section should enable
+ // periodic dumps. This is to preserve the behavior chrome://tracing UI, that
+ // is: ticking memory-infra should dump periodically with the default config.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_NoTriggers());
+ EXPECT_TRUE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process with an empty "memory_dump_config" should NOT enable periodic
+ // dumps. This is the way telemetry is supposed to use memory-infra with
+ // only explicitly triggered dumps.
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+ EXPECT_FALSE(IsPeriodicDumpingEnabled());
+ DisableTracing();
+ // Enabling memory-infra with the new (JSON) TraceConfig in a coordinator
+ // process with a fully defined trigger config should cause periodic dumps to
+ // be performed in the correct order.
+ RunLoop run_loop;
+ auto quit_closure = run_loop.QuitClosure();
+
+ const int kHeavyDumpRate = 5;
+ const int kLightDumpPeriodMs = 1;
+ const int kHeavyDumpPeriodMs = kHeavyDumpRate * kLightDumpPeriodMs;
+ // The expected sequence with light=1ms, heavy=5ms is H,L,L,L,L,H,...
+ testing::InSequence sequence;
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .Times(kHeavyDumpRate - 1);
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsDetailedDump(), _));
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .Times(kHeavyDumpRate - 2);
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(IsLightDump(), _))
+ .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+ const MemoryDumpCallback& callback) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+ }));
+
+ // Swallow all the final spurious calls until tracing gets disabled.
+ EXPECT_CALL(delegate, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(
+ kLightDumpPeriodMs, kHeavyDumpPeriodMs));
+ run_loop.Run();
DisableTracing();
}
diff --git a/chromium/base/trace_event/memory_dump_provider.h b/chromium/base/trace_event/memory_dump_provider.h
index 6e6551cc039..3b1f13623a7 100644
--- a/chromium/base/trace_event/memory_dump_provider.h
+++ b/chromium/base/trace_event/memory_dump_provider.h
@@ -7,21 +7,31 @@
#include "base/base_export.h"
#include "base/macros.h"
+#include "base/trace_event/memory_dump_request_args.h"
namespace base {
namespace trace_event {
class ProcessMemoryDump;
+// Args passed to OnMemoryDump(). This is to avoid rewriting all the subclasses
+// in the codebase when extending the MemoryDumpProvider API.
+struct MemoryDumpArgs {
+ MemoryDumpLevelOfDetail level_of_detail;
+};
+
// The contract interface that memory dump providers must implement.
class BASE_EXPORT MemoryDumpProvider {
public:
// Called by the MemoryDumpManager when generating memory dumps.
- // The embedder should return true if the |pmd| was successfully populated,
- // false if something went wrong and the dump should be considered invalid.
+ // The |args| specify if the embedder should generate light/heavy dumps on
+ // dump requests. The embedder should return true if the |pmd| was
+ // successfully populated, false if something went wrong and the dump should
+ // be considered invalid.
// (Note, the MemoryDumpManager has a fail-safe logic which will disable the
// MemoryDumpProvider for the entire trace session if it fails consistently).
- virtual bool OnMemoryDump(ProcessMemoryDump* pmd) = 0;
+ virtual bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) = 0;
protected:
MemoryDumpProvider() {}
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index a24bc72671d..48b5ba6d2c0 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -10,22 +10,41 @@ namespace base {
namespace trace_event {
// static
-const char* MemoryDumpTypeToString(
- const MemoryDumpType& dump_type) {
+const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
switch (dump_type) {
case MemoryDumpType::TASK_BEGIN:
- return "TASK_BEGIN";
+ return "task_begin";
case MemoryDumpType::TASK_END:
- return "TASK_END";
+ return "task_end";
case MemoryDumpType::PERIODIC_INTERVAL:
- return "PERIODIC_INTERVAL";
- case MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS:
- return "PERIODIC_INTERVAL_WITH_MMAPS";
+ return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
- return "EXPLICITLY_TRIGGERED";
+ return "explicitly_triggered";
}
NOTREACHED();
- return "UNKNOWN";
+ return "unknown";
+}
+
+const char* MemoryDumpLevelOfDetailToString(
+ const MemoryDumpLevelOfDetail& level_of_detail) {
+ switch (level_of_detail) {
+ case MemoryDumpLevelOfDetail::LIGHT:
+ return "light";
+ case MemoryDumpLevelOfDetail::DETAILED:
+ return "detailed";
+ }
+ NOTREACHED();
+ return "unknown";
+}
+
+MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
+ const std::string& str) {
+ if (str == "light")
+ return MemoryDumpLevelOfDetail::LIGHT;
+ if (str == "detailed")
+ return MemoryDumpLevelOfDetail::DETAILED;
+ NOTREACHED();
+ return MemoryDumpLevelOfDetail::LAST;
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index 747daabf9ed..d1bb6c3c51b 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -8,6 +8,8 @@
// This file defines the types and structs used to issue memory dump requests.
// These are also used in the IPCs for coordinating inter-process memory dumps.
+#include <string>
+
#include "base/base_export.h"
#include "base/callback.h"
@@ -20,17 +22,22 @@ enum class MemoryDumpType {
TASK_BEGIN, // Dumping memory at the beginning of a message-loop task.
TASK_END, // Dumping memory at the ending of a message-loop task.
PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
- PERIODIC_INTERVAL_WITH_MMAPS, // As above but w/ heavyweight mmaps dumps.
- // Temporary workaround for crbug.com/499731.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
LAST = EXPLICITLY_TRIGGERED // For IPC macros.
};
-// Returns the name in string for the dump type given.
-BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
-
-using MemoryDumpCallback = Callback<void(uint64 dump_guid, bool success)>;
+// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+// MemoryDumpProvider instances must guarantee that level of detail does not
+// affect the total size reported in the root node, but only the granularity of
+// the child MemoryAllocatorDump(s).
+enum class MemoryDumpLevelOfDetail {
+ LIGHT, // Few entries, typically a fixed number, per dump.
+ DETAILED, // Unrestricted amount of entries per dump.
+ LAST = DETAILED // For IPC Macros.
+};
+// Initial request arguments for a global memory dump. (see
+// MemoryDumpManager::RequestGlobalMemoryDump()).
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -38,8 +45,19 @@ struct BASE_EXPORT MemoryDumpRequestArgs {
uint64 dump_guid;
MemoryDumpType dump_type;
+ MemoryDumpLevelOfDetail level_of_detail;
};
+using MemoryDumpCallback = Callback<void(uint64 dump_guid, bool success)>;
+
+BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+
+BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
+ const MemoryDumpLevelOfDetail& level_of_detail);
+
+BASE_EXPORT MemoryDumpLevelOfDetail
+StringToMemoryDumpLevelOfDetail(const std::string& str);
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_profiler_allocation_context.cc b/chromium/base/trace_event/memory_profiler_allocation_context.cc
new file mode 100644
index 00000000000..3aea93518c6
--- /dev/null
+++ b/chromium/base/trace_event/memory_profiler_allocation_context.cc
@@ -0,0 +1,91 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_profiler_allocation_context.h"
+
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace trace_event {
+
+subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0;
+
+namespace {
+ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
+}
+
+AllocationStack::AllocationStack() {}
+AllocationStack::~AllocationStack() {}
+
+// This function is added to the TLS slot to clean up the instance when the
+// thread exits.
+void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
+ delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
+}
+
+AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() {
+ AllocationContextTracker* tracker;
+
+ if (g_tls_alloc_ctx_tracker.initialized()) {
+ tracker =
+ static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
+ } else {
+ tracker = new AllocationContextTracker();
+ g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
+ g_tls_alloc_ctx_tracker.Set(tracker);
+ }
+
+ return tracker;
+}
+
+AllocationContextTracker::AllocationContextTracker() {}
+AllocationContextTracker::~AllocationContextTracker() {}
+
+// static
+void AllocationContextTracker::SetCaptureEnabled(bool enabled) {
+ // There is no memory barrier here for performance reasons, a little lag is
+ // not an issue.
+ subtle::NoBarrier_Store(&capture_enabled_, enabled);
+}
+
+// static
+void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) {
+ auto tracker = AllocationContextTracker::GetThreadLocalTracker();
+ tracker->pseudo_stack_.push(frame);
+}
+
+// static
+void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) {
+ auto tracker = AllocationContextTracker::GetThreadLocalTracker();
+ DCHECK_EQ(frame, *tracker->pseudo_stack_.top());
+ tracker->pseudo_stack_.pop();
+}
+
+// static
+void AllocationContextTracker::SetContextField(const char* key,
+ const char* value) {
+ auto tracker = AllocationContextTracker::GetThreadLocalTracker();
+ tracker->context_[key] = value;
+}
+
+// static
+void AllocationContextTracker::UnsetContextField(const char* key) {
+ auto tracker = AllocationContextTracker::GetThreadLocalTracker();
+ tracker->context_.erase(key);
+}
+
+// static
+AllocationStack* AllocationContextTracker::GetPseudoStackForTesting() {
+ auto tracker = AllocationContextTracker::GetThreadLocalTracker();
+ return &tracker->pseudo_stack_;
+}
+
+// static
+AllocationContext AllocationContextTracker::GetContext() {
+ // TODO(ruuda): Implement this in a follow-up CL.
+ return AllocationContext();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_profiler_allocation_context.h b/chromium/base/trace_event/memory_profiler_allocation_context.h
new file mode 100644
index 00000000000..11ecc881baf
--- /dev/null
+++ b/chromium/base/trace_event/memory_profiler_allocation_context.h
@@ -0,0 +1,119 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_CONTEXT_H_
+#define BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_CONTEXT_H_
+
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/containers/small_map.h"
+
+namespace base {
+namespace trace_event {
+
+// When heap profiling is enabled, tracing keeps track of the allocation
+// context for each allocation intercepted. It is generated by the
+// |AllocationContextTracker| which keeps stacks of context in TLS.
+// The tracker is initialized lazily.
+
+using StackFrame = const char*;
+
+// A simple stack of |StackFrame| that unlike |std::stack| allows iterating
+// the stack and guards for underflow.
+class BASE_EXPORT AllocationStack {
+ public:
+ // Incrementing the iterator iterates down the stack.
+ using ConstIterator = std::vector<StackFrame>::const_reverse_iterator;
+
+ AllocationStack();
+ ~AllocationStack();
+
+ inline ConstIterator top() const { return stack_.rbegin(); }
+ inline ConstIterator bottom() const { return stack_.rend(); }
+
+ inline void push(StackFrame frame) {
+ // Impose a limit on the height to verify that every push is popped, because
+ // in practice the pseudo stack never grows higher than ~20 frames.
+ DCHECK_LT(stack_.size(), 128u);
+ stack_.push_back(frame);
+ }
+
+ inline void pop() {
+ if (!stack_.empty())
+ stack_.pop_back();
+ }
+
+ private:
+ std::vector<StackFrame> stack_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationStack);
+};
+
+class BASE_EXPORT AllocationContext {
+ // TODO(ruuda): Fill this in a follow-up CL.
+};
+
+// The allocation context tracker keeps track of thread-local context for heap
+// profiling. It includes a pseudo stack of trace events, and it might contain
+// arbitrary (key, value) context. On every allocation the tracker provides a
+// snapshot of its context in the form of an |AllocationContext| that is to be
+// stored together with the allocation details.
+class BASE_EXPORT AllocationContextTracker {
+ public:
+ // Globally enables capturing allocation context.
+ // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future?
+ // Or at least have something that guards agains enable -> disable -> enable?
+ static void SetCaptureEnabled(bool enabled);
+
+ // Returns whether capturing allocation context is enabled globally.
+ inline static bool capture_enabled() {
+ // A little lag after heap profiling is enabled or disabled is fine, it is
+ // more important that the check is as cheap as possible when capturing is
+ // not enabled, so do not issue a memory barrier.
+ return subtle::NoBarrier_Load(&capture_enabled_) != 0;
+ }
+
+ // Pushes a frame onto the thread-local pseudo stack.
+ static void PushPseudoStackFrame(StackFrame frame);
+
+ // Pops a frame from the thread-local pseudo stack.
+ static void PopPseudoStackFrame(StackFrame frame);
+
+ // Sets a thread-local (key, value) pair.
+ static void SetContextField(const char* key, const char* value);
+
+ // Removes the (key, value) pair with the specified key from the thread-local
+ // context.
+ static void UnsetContextField(const char* key);
+
+ // Returns a snapshot of the current thread-local context.
+ static AllocationContext GetContext();
+
+ // TODO(ruuda): Remove in a follow-up CL, this is only used for testing now.
+ static AllocationStack* GetPseudoStackForTesting();
+
+ ~AllocationContextTracker();
+
+ private:
+ AllocationContextTracker();
+
+ static AllocationContextTracker* GetThreadLocalTracker();
+
+ static subtle::Atomic32 capture_enabled_;
+
+ // The pseudo stack where frames are |TRACE_EVENT| names.
+ AllocationStack pseudo_stack_;
+
+ // A dictionary of arbitrary context.
+ SmallMap<std::map<const char*, const char*>> context_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_CONTEXT_H_
diff --git a/chromium/base/trace_event/memory_profiler_allocation_context_unittest.cc b/chromium/base/trace_event/memory_profiler_allocation_context_unittest.cc
new file mode 100644
index 00000000000..a5b7244717a
--- /dev/null
+++ b/chromium/base/trace_event/memory_profiler_allocation_context_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_profiler_allocation_context.h"
+#include "base/trace_event/trace_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Define all strings once, because the pseudo stack requires pointer equality,
+// and string interning is unreliable.
+const char kCupcake[] = "Cupcake";
+const char kDonut[] = "Donut";
+const char kEclair[] = "Eclair";
+const char kFroyo[] = "Froyo";
+const char kGingerbread[] = "Gingerbread";
+const char kHoneycomb[] = "Honeycomb";
+
+// Asserts that the fixed-size array |expected_stack| matches the pseudo
+// stack. Syntax note: |const StackFrame (&expected_stack)[N]| is the syntax
+// for "expected_stack is a reference to a const fixed-size array of StackFrame
+// of length N".
+template <size_t N>
+void AssertPseudoStackEquals(const StackFrame(&expected_stack)[N]) {
+ auto pseudo_stack = AllocationContextTracker::GetPseudoStackForTesting();
+ auto actual = pseudo_stack->top();
+ auto actual_bottom = pseudo_stack->bottom();
+ auto expected = expected_stack;
+ auto expected_bottom = expected_stack + N;
+
+ // Note that this requires the pointers to be equal, this is not doing a deep
+ // string comparison.
+ for (; actual != actual_bottom && expected != expected_bottom;
+ actual++, expected++)
+ ASSERT_EQ(*expected, *actual);
+
+ // Ensure that the height of the stacks is the same.
+ ASSERT_EQ(actual, actual_bottom);
+ ASSERT_EQ(expected, expected_bottom);
+}
+
+void AssertPseudoStackEmpty() {
+ auto pseudo_stack = AllocationContextTracker::GetPseudoStackForTesting();
+ ASSERT_EQ(pseudo_stack->top(), pseudo_stack->bottom());
+}
+
+class AllocationContextTest : public testing::Test {
+ public:
+ void EnableTracing() {
+ TraceConfig config("");
+ TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
+ AllocationContextTracker::SetCaptureEnabled(true);
+ }
+
+ void DisableTracing() {
+ AllocationContextTracker::SetCaptureEnabled(false);
+ TraceLog::GetInstance()->SetDisabled();
+ }
+};
+
+TEST_F(AllocationContextTest, PseudoStackScopedTrace) {
+ StackFrame c = kCupcake;
+ StackFrame d = kDonut;
+ StackFrame e = kEclair;
+ StackFrame f = kFroyo;
+
+ EnableTracing();
+ AssertPseudoStackEmpty();
+
+ {
+ TRACE_EVENT0("Testing", kCupcake);
+ StackFrame frame_c[] = {c};
+ AssertPseudoStackEquals(frame_c);
+
+ {
+ TRACE_EVENT0("Testing", kDonut);
+ StackFrame frame_dc[] = {d, c};
+ AssertPseudoStackEquals(frame_dc);
+ }
+
+ AssertPseudoStackEquals(frame_c);
+
+ {
+ TRACE_EVENT0("Testing", kEclair);
+ StackFrame frame_ec[] = {e, c};
+ AssertPseudoStackEquals(frame_ec);
+ }
+
+ AssertPseudoStackEquals(frame_c);
+ }
+
+ AssertPseudoStackEmpty();
+
+ {
+ TRACE_EVENT0("Testing", kFroyo);
+ StackFrame frame_f[] = {f};
+ AssertPseudoStackEquals(frame_f);
+ }
+
+ AssertPseudoStackEmpty();
+ DisableTracing();
+}
+
+TEST_F(AllocationContextTest, PseudoStackBeginEndTrace) {
+ StackFrame c = kCupcake;
+ StackFrame d = kDonut;
+ StackFrame e = kEclair;
+ StackFrame f = kFroyo;
+
+ StackFrame frame_c[] = {c};
+ StackFrame frame_dc[] = {d, c};
+ StackFrame frame_ec[] = {e, c};
+ StackFrame frame_f[] = {f};
+
+ EnableTracing();
+ AssertPseudoStackEmpty();
+
+ TRACE_EVENT_BEGIN0("Testing", kCupcake);
+ AssertPseudoStackEquals(frame_c);
+
+ TRACE_EVENT_BEGIN0("Testing", kDonut);
+ AssertPseudoStackEquals(frame_dc);
+ TRACE_EVENT_END0("Testing", kDonut);
+
+ AssertPseudoStackEquals(frame_c);
+
+ TRACE_EVENT_BEGIN0("Testing", kEclair);
+ AssertPseudoStackEquals(frame_ec);
+ TRACE_EVENT_END0("Testing", kEclair);
+
+ AssertPseudoStackEquals(frame_c);
+ TRACE_EVENT_END0("Testing", kCupcake);
+
+ AssertPseudoStackEmpty();
+
+ TRACE_EVENT_BEGIN0("Testing", kFroyo);
+ AssertPseudoStackEquals(frame_f);
+ TRACE_EVENT_END0("Testing", kFroyo);
+
+ AssertPseudoStackEmpty();
+ DisableTracing();
+}
+
+TEST_F(AllocationContextTest, PseudoStackMixedTrace) {
+ StackFrame c = kCupcake;
+ StackFrame d = kDonut;
+ StackFrame e = kEclair;
+ StackFrame f = kFroyo;
+
+ StackFrame frame_c[] = {c};
+ StackFrame frame_dc[] = {d, c};
+ StackFrame frame_e[] = {e};
+ StackFrame frame_fe[] = {f, e};
+
+ EnableTracing();
+ AssertPseudoStackEmpty();
+
+ TRACE_EVENT_BEGIN0("Testing", kCupcake);
+ AssertPseudoStackEquals(frame_c);
+
+ {
+ TRACE_EVENT0("Testing", kDonut);
+ AssertPseudoStackEquals(frame_dc);
+ }
+
+ AssertPseudoStackEquals(frame_c);
+ TRACE_EVENT_END0("Testing", kCupcake);
+ AssertPseudoStackEmpty();
+
+ {
+ TRACE_EVENT0("Testing", kEclair);
+ AssertPseudoStackEquals(frame_e);
+
+ TRACE_EVENT_BEGIN0("Testing", kFroyo);
+ AssertPseudoStackEquals(frame_fe);
+ TRACE_EVENT_END0("Testing", kFroyo);
+ AssertPseudoStackEquals(frame_e);
+ }
+
+ AssertPseudoStackEmpty();
+ DisableTracing();
+}
+
+TEST_F(AllocationContextTest, PseudoStackEnableWithEventInScope) {
+ StackFrame h = kHoneycomb;
+
+ {
+ TRACE_EVENT0("Testing", kGingerbread);
+ EnableTracing();
+ AssertPseudoStackEmpty();
+
+ {
+ TRACE_EVENT0("Testing", kHoneycomb);
+ StackFrame frame_h[] = {h};
+ AssertPseudoStackEquals(frame_h);
+ }
+
+ AssertPseudoStackEmpty();
+
+ // The pop at the end of this scope for the 'Gingerbread' frame must not
+ // cause a stack underflow.
+ }
+ AssertPseudoStackEmpty();
+ DisableTracing();
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 46ae1fc6118..67118f10e8c 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -58,7 +58,12 @@ MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
- return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+ // A shared allocator dump can be shared within a process and the guid could
+ // have been created already.
+ MemoryAllocatorDump* allocator_dump = GetSharedGlobalAllocatorDump(guid);
+ return allocator_dump ? allocator_dump
+ : CreateAllocatorDump(
+ GetSharedGlobalAllocatorDumpName(guid), guid);
}
MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index 3b71a2c41ae..da18a14b1ca 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -25,12 +25,8 @@ class ConvertableToTraceFormat;
class MemoryDumpManager;
class MemoryDumpSessionState;
-// ProcessMemoryDump is as a strongly typed container which enforces the data
-// model for each memory dump and holds the dumps produced by the
-// MemoryDumpProvider(s) for a specific process.
-// At trace generation time (i.e. when AsValue() is called), ProcessMemoryDump
-// will compose a key-value dictionary of the various dumps obtained at trace
-// dump point time.
+// ProcessMemoryDump is as a strongly typed container which holds the dumps
+// produced by the MemoryDumpProvider(s) for a specific process.
class BASE_EXPORT ProcessMemoryDump {
public:
struct MemoryAllocatorDumpEdge {
diff --git a/chromium/base/trace_event/process_memory_maps_dump_provider.cc b/chromium/base/trace_event/process_memory_maps_dump_provider.cc
index e0ae20d2a5b..38b2573e6b6 100644
--- a/chromium/base/trace_event/process_memory_maps_dump_provider.cc
+++ b/chromium/base/trace_event/process_memory_maps_dump_provider.cc
@@ -163,9 +163,14 @@ ProcessMemoryMapsDumpProvider::ProcessMemoryMapsDumpProvider() {
ProcessMemoryMapsDumpProvider::~ProcessMemoryMapsDumpProvider() {
}
-// Called at trace dump point time. Creates a snapshot the memory maps for the
-// current process.
-bool ProcessMemoryMapsDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
+// Called at trace dump point time. Creates a snapshot of the memory maps for
+// the current process.
+bool ProcessMemoryMapsDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
+ // Snapshot of memory maps is not taken for light dump requests.
+ if (args.level_of_detail == MemoryDumpLevelOfDetail::LIGHT)
+ return true;
+
uint32 res = 0;
#if defined(OS_LINUX) || defined(OS_ANDROID)
diff --git a/chromium/base/trace_event/process_memory_maps_dump_provider.h b/chromium/base/trace_event/process_memory_maps_dump_provider.h
index c73c4d2be66..5a0f84cbb66 100644
--- a/chromium/base/trace_event/process_memory_maps_dump_provider.h
+++ b/chromium/base/trace_event/process_memory_maps_dump_provider.h
@@ -20,7 +20,8 @@ class BASE_EXPORT ProcessMemoryMapsDumpProvider : public MemoryDumpProvider {
static ProcessMemoryMapsDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
private:
friend struct DefaultSingletonTraits<ProcessMemoryMapsDumpProvider>;
diff --git a/chromium/base/trace_event/process_memory_maps_dump_provider_unittest.cc b/chromium/base/trace_event/process_memory_maps_dump_provider_unittest.cc
index 5416e119960..a73a21c772e 100644
--- a/chromium/base/trace_event/process_memory_maps_dump_provider_unittest.cc
+++ b/chromium/base/trace_event/process_memory_maps_dump_provider_unittest.cc
@@ -110,6 +110,7 @@ TEST(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps) {
const uint32 kProtR = ProcessMemoryMaps::VMRegion::kProtectionFlagsRead;
const uint32 kProtW = ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite;
const uint32 kProtX = ProcessMemoryMaps::VMRegion::kProtectionFlagsExec;
+ const MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
auto pmmdp = ProcessMemoryMapsDumpProvider::GetInstance();
@@ -118,21 +119,21 @@ TEST(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps) {
std::ifstream non_existent_file("/tmp/does-not-exist");
ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = &non_existent_file;
CHECK_EQ(false, non_existent_file.good());
- pmmdp->OnMemoryDump(&pmd_invalid);
+ pmmdp->OnMemoryDump(dump_args, &pmd_invalid);
ASSERT_FALSE(pmd_invalid.has_process_mmaps());
// Emulate an empty /proc/self/smaps.
std::ifstream empty_file("/dev/null");
ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = &empty_file;
CHECK_EQ(true, empty_file.good());
- pmmdp->OnMemoryDump(&pmd_invalid);
+ pmmdp->OnMemoryDump(dump_args, &pmd_invalid);
ASSERT_FALSE(pmd_invalid.has_process_mmaps());
// Parse the 1st smaps file.
ProcessMemoryDump pmd_1(nullptr /* session_state */);
std::istringstream test_smaps_1(kTestSmaps1);
ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = &test_smaps_1;
- pmmdp->OnMemoryDump(&pmd_1);
+ pmmdp->OnMemoryDump(dump_args, &pmd_1);
ASSERT_TRUE(pmd_1.has_process_mmaps());
const auto& regions_1 = pmd_1.process_mmaps()->vm_regions();
ASSERT_EQ(2UL, regions_1.size());
@@ -163,7 +164,7 @@ TEST(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps) {
ProcessMemoryDump pmd_2(nullptr /* session_state */);
std::istringstream test_smaps_2(kTestSmaps2);
ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = &test_smaps_2;
- pmmdp->OnMemoryDump(&pmd_2);
+ pmmdp->OnMemoryDump(dump_args, &pmd_2);
ASSERT_TRUE(pmd_2.has_process_mmaps());
const auto& regions_2 = pmd_2.process_mmaps()->vm_regions();
ASSERT_EQ(1UL, regions_2.size());
diff --git a/chromium/base/trace_event/process_memory_totals_dump_provider.cc b/chromium/base/trace_event/process_memory_totals_dump_provider.cc
index 37f9bed68c9..a8617207dcf 100644
--- a/chromium/base/trace_event/process_memory_totals_dump_provider.cc
+++ b/chromium/base/trace_event/process_memory_totals_dump_provider.cc
@@ -53,7 +53,8 @@ ProcessMemoryTotalsDumpProvider::~ProcessMemoryTotalsDumpProvider() {
// Called at trace dump point time. Creates a snapshot the memory counters for
// the current process.
-bool ProcessMemoryTotalsDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
+bool ProcessMemoryTotalsDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
const uint64 rss_bytes = rss_bytes_for_testing
? rss_bytes_for_testing
: process_metrics_->GetWorkingSetSize();
diff --git a/chromium/base/trace_event/process_memory_totals_dump_provider.h b/chromium/base/trace_event/process_memory_totals_dump_provider.h
index 6c86eb6b4a1..66d4f9956dd 100644
--- a/chromium/base/trace_event/process_memory_totals_dump_provider.h
+++ b/chromium/base/trace_event/process_memory_totals_dump_provider.h
@@ -22,7 +22,8 @@ class BASE_EXPORT ProcessMemoryTotalsDumpProvider : public MemoryDumpProvider {
static ProcessMemoryTotalsDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
private:
friend struct DefaultSingletonTraits<ProcessMemoryTotalsDumpProvider>;
diff --git a/chromium/base/trace_event/process_memory_totals_dump_provider_unittest.cc b/chromium/base/trace_event/process_memory_totals_dump_provider_unittest.cc
index f9bb6c071ce..4ec37f57653 100644
--- a/chromium/base/trace_event/process_memory_totals_dump_provider_unittest.cc
+++ b/chromium/base/trace_event/process_memory_totals_dump_provider_unittest.cc
@@ -12,18 +12,19 @@ namespace base {
namespace trace_event {
TEST(ProcessMemoryTotalsDumpProviderTest, DumpRSS) {
+ const MemoryDumpArgs high_detail_args = {MemoryDumpLevelOfDetail::DETAILED};
auto pmtdp = ProcessMemoryTotalsDumpProvider::GetInstance();
scoped_ptr<ProcessMemoryDump> pmd_before(new ProcessMemoryDump(nullptr));
scoped_ptr<ProcessMemoryDump> pmd_after(new ProcessMemoryDump(nullptr));
ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 1024;
- pmtdp->OnMemoryDump(pmd_before.get());
+ pmtdp->OnMemoryDump(high_detail_args, pmd_before.get());
// Pretend that the RSS of the process increased of +1M.
const size_t kAllocSize = 1048576;
ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing += kAllocSize;
- pmtdp->OnMemoryDump(pmd_after.get());
+ pmtdp->OnMemoryDump(high_detail_args, pmd_after.get());
ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 0;
diff --git a/chromium/base/trace_event/trace_buffer.cc b/chromium/base/trace_event/trace_buffer.cc
new file mode 100644
index 00000000000..a2e4f141ef4
--- /dev/null
+++ b/chromium/base/trace_event/trace_buffer.cc
@@ -0,0 +1,396 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_buffer.h"
+
+#include "base/memory/scoped_vector.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class TraceBufferRingBuffer : public TraceBuffer {
+ public:
+ TraceBufferRingBuffer(size_t max_chunks)
+ : max_chunks_(max_chunks),
+ recyclable_chunks_queue_(new size_t[queue_capacity()]),
+ queue_head_(0),
+ queue_tail_(max_chunks),
+ current_iteration_index_(0),
+ current_chunk_seq_(1) {
+ chunks_.reserve(max_chunks);
+ for (size_t i = 0; i < max_chunks; ++i)
+ recyclable_chunks_queue_[i] = i;
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ // Because the number of threads is much less than the number of chunks,
+ // the queue should never be empty.
+ DCHECK(!QueueIsEmpty());
+
+ *index = recyclable_chunks_queue_[queue_head_];
+ queue_head_ = NextQueueIndex(queue_head_);
+ current_iteration_index_ = queue_head_;
+
+ if (*index >= chunks_.size())
+ chunks_.resize(*index + 1);
+
+ TraceBufferChunk* chunk = chunks_[*index];
+ chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
+ if (chunk)
+ chunk->Reset(current_chunk_seq_++);
+ else
+ chunk = new TraceBufferChunk(current_chunk_seq_++);
+
+ return scoped_ptr<TraceBufferChunk>(chunk);
+ }
+
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+ // When this method is called, the queue should not be full because it
+ // can contain all chunks including the one to be returned.
+ DCHECK(!QueueIsFull());
+ DCHECK(chunk);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ chunks_[index] = chunk.release();
+ recyclable_chunks_queue_[queue_tail_] = index;
+ queue_tail_ = NextQueueIndex(queue_tail_);
+ }
+
+ bool IsFull() const override { return false; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ if (chunks_.empty())
+ return NULL;
+
+ while (current_iteration_index_ != queue_tail_) {
+ size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
+ current_iteration_index_ = NextQueueIndex(current_iteration_index_);
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ DCHECK(chunks_[chunk_index]);
+ return chunks_[chunk_index];
+ }
+ return NULL;
+ }
+
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
+ for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+ queue_index = NextQueueIndex(queue_index)) {
+ size_t chunk_index = recyclable_chunks_queue_[queue_index];
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ TraceBufferChunk* chunk = chunks_[chunk_index];
+ cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
+ }
+ return cloned_buffer.Pass();
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ overhead->Add("TraceBufferRingBuffer", sizeof(*this));
+ for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+ queue_index = NextQueueIndex(queue_index)) {
+ size_t chunk_index = recyclable_chunks_queue_[queue_index];
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
+ }
+ }
+
+ private:
+ class ClonedTraceBuffer : public TraceBuffer {
+ public:
+ ClonedTraceBuffer() : current_iteration_index_(0) {}
+
+ // The only implemented method.
+ const TraceBufferChunk* NextChunk() override {
+ return current_iteration_index_ < chunks_.size()
+ ? chunks_[current_iteration_index_++]
+ : NULL;
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBufferChunk>();
+ }
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
+ NOTIMPLEMENTED();
+ }
+ bool IsFull() const override { return false; }
+ size_t Size() const override { return 0; }
+ size_t Capacity() const override { return 0; }
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ return NULL;
+ }
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBuffer>();
+ }
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ NOTIMPLEMENTED();
+ }
+
+ size_t current_iteration_index_;
+ ScopedVector<TraceBufferChunk> chunks_;
+ };
+
+ bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
+
+ size_t QueueSize() const {
+ return queue_tail_ > queue_head_
+ ? queue_tail_ - queue_head_
+ : queue_tail_ + queue_capacity() - queue_head_;
+ }
+
+ bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
+
+ size_t queue_capacity() const {
+ // One extra space to help distinguish full state and empty state.
+ return max_chunks_ + 1;
+ }
+
+ size_t NextQueueIndex(size_t index) const {
+ index++;
+ if (index >= queue_capacity())
+ index = 0;
+ return index;
+ }
+
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ scoped_ptr<size_t[]> recyclable_chunks_queue_;
+ size_t queue_head_;
+ size_t queue_tail_;
+
+ size_t current_iteration_index_;
+ uint32 current_chunk_seq_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
+};
+
+class TraceBufferVector : public TraceBuffer {
+ public:
+ TraceBufferVector(size_t max_chunks)
+ : in_flight_chunk_count_(0),
+ current_iteration_index_(0),
+ max_chunks_(max_chunks) {
+ chunks_.reserve(max_chunks_);
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ // This function may be called when adding normal events or indirectly from
+ // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
+ // have to add the metadata events and flush thread-local buffers even if
+ // the buffer is full.
+ *index = chunks_.size();
+ chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
+ ++in_flight_chunk_count_;
+ // + 1 because zero chunk_seq is not allowed.
+ return scoped_ptr<TraceBufferChunk>(
+ new TraceBufferChunk(static_cast<uint32>(*index) + 1));
+ }
+
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+ DCHECK_GT(in_flight_chunk_count_, 0u);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ --in_flight_chunk_count_;
+ chunks_[index] = chunk.release();
+ }
+
+ bool IsFull() const override { return chunks_.size() >= max_chunks_; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ while (current_iteration_index_ < chunks_.size()) {
+ // Skip in-flight chunks.
+ const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
+ if (chunk)
+ return chunk;
+ }
+ return NULL;
+ }
+
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBuffer>();
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ const size_t chunks_ptr_vector_allocated_size =
+ sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
+ const size_t chunks_ptr_vector_resident_size =
+ sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
+ overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
+ chunks_ptr_vector_resident_size);
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ TraceBufferChunk* chunk = chunks_[i];
+ // Skip the in-flight (nullptr) chunks. They will be accounted by the
+ // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
+ if (chunk)
+ chunk->EstimateTraceMemoryOverhead(overhead);
+ }
+ }
+
+ private:
+ size_t in_flight_chunk_count_;
+ size_t current_iteration_index_;
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
+};
+
+} // namespace
+
+TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {}
+
+TraceBufferChunk::~TraceBufferChunk() {}
+
+void TraceBufferChunk::Reset(uint32 new_seq) {
+ for (size_t i = 0; i < next_free_; ++i)
+ chunk_[i].Reset();
+ next_free_ = 0;
+ seq_ = new_seq;
+ cached_overhead_estimate_.reset();
+}
+
+TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
+ DCHECK(!IsFull());
+ *event_index = next_free_++;
+ return &chunk_[*event_index];
+}
+
+scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
+ scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
+ cloned_chunk->next_free_ = next_free_;
+ for (size_t i = 0; i < next_free_; ++i)
+ cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
+ return cloned_chunk.Pass();
+}
+
+void TraceBufferChunk::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ if (!cached_overhead_estimate_) {
+ cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
+
+ // When estimating the size of TraceBufferChunk, exclude the array of trace
+ // events, as they are computed individually below.
+ cached_overhead_estimate_->Add("TraceBufferChunk",
+ sizeof(*this) - sizeof(chunk_));
+ }
+
+ const size_t num_cached_estimated_events =
+ cached_overhead_estimate_->GetCount("TraceEvent");
+ DCHECK_LE(num_cached_estimated_events, size());
+
+ if (IsFull() && num_cached_estimated_events == size()) {
+ overhead->Update(*cached_overhead_estimate_);
+ return;
+ }
+
+ for (size_t i = num_cached_estimated_events; i < size(); ++i)
+ chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
+
+ if (IsFull()) {
+ cached_overhead_estimate_->AddSelf();
+ } else {
+ // The unused TraceEvents in |chunks_| are not cached. They will keep
+ // changing as new TraceEvents are added to this chunk, so they are
+ // computed on the fly.
+ const size_t num_unused_trace_events = capacity() - size();
+ overhead->Add("TraceEvent (unused)",
+ num_unused_trace_events * sizeof(TraceEvent));
+ }
+
+ overhead->Update(*cached_overhead_estimate_);
+}
+
+TraceResultBuffer::OutputCallback
+TraceResultBuffer::SimpleOutput::GetCallback() {
+ return Bind(&SimpleOutput::Append, Unretained(this));
+}
+
+void TraceResultBuffer::SimpleOutput::Append(
+ const std::string& json_trace_output) {
+ json_output += json_trace_output;
+}
+
+TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
+
+TraceResultBuffer::~TraceResultBuffer() {}
+
+void TraceResultBuffer::SetOutputCallback(
+ const OutputCallback& json_chunk_callback) {
+ output_callback_ = json_chunk_callback;
+}
+
+void TraceResultBuffer::Start() {
+ append_comma_ = false;
+ output_callback_.Run("[");
+}
+
+void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
+ if (append_comma_)
+ output_callback_.Run(",");
+ append_comma_ = true;
+ output_callback_.Run(trace_fragment);
+}
+
+void TraceResultBuffer::Finish() {
+ output_callback_.Run("]");
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
+ return new TraceBufferRingBuffer(max_chunks);
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
+ return new TraceBufferVector(max_chunks);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_buffer.h b/chromium/base/trace_event/trace_buffer.h
new file mode 100644
index 00000000000..d54bd74bec5
--- /dev/null
+++ b/chromium/base/trace_event/trace_buffer.h
@@ -0,0 +1,130 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_BUFFER_H_
+#define BASE_TRACE_EVENT_TRACE_BUFFER_H_
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+namespace trace_event {
+
+// TraceBufferChunk is the basic unit of TraceBuffer.
+class BASE_EXPORT TraceBufferChunk {
+ public:
+ explicit TraceBufferChunk(uint32 seq);
+ ~TraceBufferChunk();
+
+ void Reset(uint32 new_seq);
+ TraceEvent* AddTraceEvent(size_t* event_index);
+ bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
+
+ uint32 seq() const { return seq_; }
+ size_t capacity() const { return kTraceBufferChunkSize; }
+ size_t size() const { return next_free_; }
+
+ TraceEvent* GetEventAt(size_t index) {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+ const TraceEvent* GetEventAt(size_t index) const {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+
+ scoped_ptr<TraceBufferChunk> Clone() const;
+
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ // These values must be kept consistent with the numbers of bits of
+ // chunk_index and event_index fields in TraceEventHandle
+ // (in trace_event_impl.h).
+ static const size_t kMaxChunkIndex = (1u << 26) - 1;
+ static const size_t kTraceBufferChunkSize = 64;
+
+ private:
+ size_t next_free_;
+ scoped_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
+ TraceEvent chunk_[kTraceBufferChunkSize];
+ uint32 seq_;
+};
+
+// TraceBuffer holds the events as they are collected.
+class BASE_EXPORT TraceBuffer {
+ public:
+ virtual ~TraceBuffer() {}
+
+ virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
+ virtual void ReturnChunk(size_t index,
+ scoped_ptr<TraceBufferChunk> chunk) = 0;
+
+ virtual bool IsFull() const = 0;
+ virtual size_t Size() const = 0;
+ virtual size_t Capacity() const = 0;
+ virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
+
+ // For iteration. Each TraceBuffer can only be iterated once.
+ virtual const TraceBufferChunk* NextChunk() = 0;
+
+ virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
+
+ // Computes an estimate of the size of the buffer, including all the retained
+ // objects.
+ virtual void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) = 0;
+
+ static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks);
+ static TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
+};
+
+// TraceResultBuffer collects and converts trace fragments returned by TraceLog
+// to JSON output.
+class BASE_EXPORT TraceResultBuffer {
+ public:
+ typedef base::Callback<void(const std::string&)> OutputCallback;
+
+ // If you don't need to stream JSON chunks out efficiently, and just want to
+ // get a complete JSON string after calling Finish, use this struct to collect
+ // JSON trace output.
+ struct BASE_EXPORT SimpleOutput {
+ OutputCallback GetCallback();
+ void Append(const std::string& json_string);
+
+ // Do what you want with the json_output_ string after calling
+ // TraceResultBuffer::Finish.
+ std::string json_output;
+ };
+
+ TraceResultBuffer();
+ ~TraceResultBuffer();
+
+ // Set callback. The callback will be called during Start with the initial
+ // JSON output and during AddFragment and Finish with following JSON output
+ // chunks. The callback target must live past the last calls to
+ // TraceResultBuffer::Start/AddFragment/Finish.
+ void SetOutputCallback(const OutputCallback& json_chunk_callback);
+
+ // Start JSON output. This resets all internal state, so you can reuse
+ // the TraceResultBuffer by calling Start.
+ void Start();
+
+ // Call AddFragment 0 or more times to add trace fragments from TraceLog.
+ void AddFragment(const std::string& trace_fragment);
+
+ // When all fragments have been added, call Finish to complete the JSON
+ // formatted output.
+ void Finish();
+
+ private:
+ OutputCallback output_callback_;
+ bool append_comma_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_BUFFER_H_
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 2a15ec578fc..9f0367bd2a2 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -10,6 +10,8 @@
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event.h"
namespace base {
@@ -37,6 +39,21 @@ const char kSyntheticDelaysParam[] = "synthetic_delays";
const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
+// String parameters that is used to parse memory dump config in trace config
+// string.
+const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kTriggersParam[] = "triggers";
+const char kPeriodicIntervalParam[] = "periodic_interval_ms";
+const char kModeParam[] = "mode";
+
+// Default configuration of memory dumps.
+const TraceConfig::MemoryDumpTriggerConfig kDefaultHeavyMemoryDumpTrigger = {
+ 2000, // periodic_interval_ms
+ MemoryDumpLevelOfDetail::DETAILED};
+const TraceConfig::MemoryDumpTriggerConfig kDefaultLightMemoryDumpTrigger = {
+ 250, // periodic_interval_ms
+ MemoryDumpLevelOfDetail::LIGHT};
+
} // namespace
TraceConfig::TraceConfig() {
@@ -82,11 +99,11 @@ TraceConfig::TraceConfig(const TraceConfig& tc)
enable_sampling_(tc.enable_sampling_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
+ memory_dump_config_(tc.memory_dump_config_),
included_categories_(tc.included_categories_),
disabled_categories_(tc.disabled_categories_),
excluded_categories_(tc.excluded_categories_),
- synthetic_delays_(tc.synthetic_delays_) {
-}
+ synthetic_delays_(tc.synthetic_delays_) {}
TraceConfig::~TraceConfig() {
}
@@ -99,6 +116,7 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
enable_sampling_ = rhs.enable_sampling_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
+ memory_dump_config_ = rhs.memory_dump_config_;
included_categories_ = rhs.included_categories_;
disabled_categories_ = rhs.disabled_categories_;
excluded_categories_ = rhs.excluded_categories_;
@@ -207,6 +225,10 @@ void TraceConfig::Merge(const TraceConfig& config) {
included_categories_.clear();
}
+ memory_dump_config_.insert(memory_dump_config_.end(),
+ config.memory_dump_config_.begin(),
+ config.memory_dump_config_.end());
+
disabled_categories_.insert(disabled_categories_.end(),
config.disabled_categories_.begin(),
config.disabled_categories_.end());
@@ -227,6 +249,7 @@ void TraceConfig::Clear() {
disabled_categories_.clear();
excluded_categories_.clear();
synthetic_delays_.clear();
+ memory_dump_config_.clear();
}
void TraceConfig::InitializeDefault() {
@@ -279,23 +302,33 @@ void TraceConfig::InitializeFromConfigString(const std::string& config_string) {
else
enable_argument_filter_ = enable_argument_filter;
-
- base::ListValue* category_list = NULL;
+ base::ListValue* category_list = nullptr;
if (dict->GetList(kIncludedCategoriesParam, &category_list))
SetCategoriesFromIncludedList(*category_list);
if (dict->GetList(kExcludedCategoriesParam, &category_list))
SetCategoriesFromExcludedList(*category_list);
if (dict->GetList(kSyntheticDelaysParam, &category_list))
SetSyntheticDelaysFromList(*category_list);
+
+ if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ // If dump triggers not set, the client is using the legacy with just
+ // category enabled. So, use the default periodic dump config.
+ base::DictionaryValue* memory_dump_config = nullptr;
+ if (dict->GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
+ SetMemoryDumpConfig(*memory_dump_config);
+ else
+ SetDefaultMemoryDumpConfig();
+ }
}
void TraceConfig::InitializeFromStrings(
const std::string& category_filter_string,
const std::string& trace_options_string) {
if (!category_filter_string.empty()) {
- std::vector<std::string> split;
+ std::vector<std::string> split = base::SplitString(
+ category_filter_string, ",", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL);
std::vector<std::string>::iterator iter;
- base::SplitString(category_filter_string, ',', &split);
for (iter = split.begin(); iter != split.end(); ++iter) {
std::string category = *iter;
// Ignore empty categories.
@@ -331,9 +364,9 @@ void TraceConfig::InitializeFromStrings(
enable_systrace_ = false;
enable_argument_filter_ = false;
if(!trace_options_string.empty()) {
- std::vector<std::string> split;
+ std::vector<std::string> split = base::SplitString(
+ trace_options_string, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::vector<std::string>::iterator iter;
- base::SplitString(trace_options_string, ',', &split);
for (iter = split.begin(); iter != split.end(); ++iter) {
if (*iter == kRecordUntilFull) {
record_mode_ = RECORD_UNTIL_FULL;
@@ -352,6 +385,10 @@ void TraceConfig::InitializeFromStrings(
}
}
}
+
+ if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ SetDefaultMemoryDumpConfig();
+ }
}
void TraceConfig::SetCategoriesFromIncludedList(
@@ -411,6 +448,43 @@ void TraceConfig::AddCategoryToDict(base::DictionaryValue& dict,
dict.Set(param, list.Pass());
}
+void TraceConfig::SetMemoryDumpConfig(
+ const base::DictionaryValue& memory_dump_config) {
+ memory_dump_config_.clear();
+
+ const base::ListValue* trigger_list = nullptr;
+ if (!memory_dump_config.GetList(kTriggersParam, &trigger_list) ||
+ trigger_list->GetSize() == 0) {
+ return;
+ }
+
+ for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
+ const base::DictionaryValue* trigger = nullptr;
+ if (!trigger_list->GetDictionary(i, &trigger))
+ continue;
+
+ MemoryDumpTriggerConfig dump_config;
+ int interval = 0;
+
+ if (!trigger->GetInteger(kPeriodicIntervalParam, &interval)) {
+ continue;
+ }
+ DCHECK_GT(interval, 0);
+ dump_config.periodic_interval_ms = static_cast<uint32>(interval);
+ std::string level_of_detail_str;
+ trigger->GetString(kModeParam, &level_of_detail_str);
+ dump_config.level_of_detail =
+ StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+ memory_dump_config_.push_back(dump_config);
+ }
+}
+
+void TraceConfig::SetDefaultMemoryDumpConfig() {
+ memory_dump_config_.clear();
+ memory_dump_config_.push_back(kDefaultHeavyMemoryDumpTrigger);
+ memory_dump_config_.push_back(kDefaultLightMemoryDumpTrigger);
+}
+
void TraceConfig::ToDict(base::DictionaryValue& dict) const {
switch (record_mode_) {
case RECORD_UNTIL_FULL:
@@ -451,6 +525,26 @@ void TraceConfig::ToDict(base::DictionaryValue& dict) const {
AddCategoryToDict(dict, kIncludedCategoriesParam, categories);
AddCategoryToDict(dict, kExcludedCategoriesParam, excluded_categories_);
AddCategoryToDict(dict, kSyntheticDelaysParam, synthetic_delays_);
+
+ if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
+ scoped_ptr<base::DictionaryValue> memory_dump_config(
+ new base::DictionaryValue());
+ scoped_ptr<base::ListValue> triggers_list(new base::ListValue());
+ for (const MemoryDumpTriggerConfig& config : memory_dump_config_) {
+ scoped_ptr<base::DictionaryValue> trigger_dict(
+ new base::DictionaryValue());
+ trigger_dict->SetInteger(kPeriodicIntervalParam,
+ static_cast<int>(config.periodic_interval_ms));
+ trigger_dict->SetString(
+ kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
+ triggers_list->Append(trigger_dict.Pass());
+ }
+
+ // Empty triggers will still be specified explicitly since it means that
+ // the periodic dumps are not enabled.
+ memory_dump_config->Set(kTriggersParam, triggers_list.Pass());
+ dict.Set(kMemoryDumpConfigParam, memory_dump_config.Pass());
+ }
}
std::string TraceConfig::ToTraceOptionsString() const {
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index a9f83065620..44cf16df878 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,6 +10,7 @@
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
+#include "base/trace_event/memory_dump_request_args.h"
#include "base/values.h"
namespace base {
@@ -35,6 +36,15 @@ class BASE_EXPORT TraceConfig {
public:
typedef std::vector<std::string> StringList;
+ // Specifies the memory dump config for tracing. Used only when
+ // "memory-infra" category is enabled.
+ struct MemoryDumpTriggerConfig {
+ uint32 periodic_interval_ms;
+ MemoryDumpLevelOfDetail level_of_detail;
+ };
+
+ typedef std::vector<MemoryDumpTriggerConfig> MemoryDumpConfig;
+
TraceConfig();
// Create TraceConfig object from category filter and trace options strings.
@@ -99,10 +109,21 @@ class BASE_EXPORT TraceConfig {
// "enable_argument_filter": true,
// "included_categories": ["included",
// "inc_pattern*",
- // "disabled-by-default-category1"],
+ // "disabled-by-default-memory-infra"],
// "excluded_categories": ["excluded", "exc_pattern*"],
// "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"]
+ // "memory_dump_config": {
+ // "triggers": [
+ // {
+ // "mode": "detailed",
+ // "periodic_interval_ms": 2000
+ // }
+ // ]
+ // }
// }
+ //
+ // Note: memory_dump_config can be specified only if
+ // disabled-by-default-memory-infra category is enabled.
explicit TraceConfig(const std::string& config_string);
TraceConfig(const TraceConfig& tc);
@@ -140,6 +161,10 @@ class BASE_EXPORT TraceConfig {
void Clear();
+ const MemoryDumpConfig& memory_dump_config() const {
+ return memory_dump_config_;
+ }
+
private:
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -149,6 +174,9 @@ class BASE_EXPORT TraceConfig {
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
IsEmptyOrContainsLeadingOrTrailingWhitespace);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
+ FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
@@ -169,6 +197,9 @@ class BASE_EXPORT TraceConfig {
const char* param,
const StringList& categories) const;
+ void SetMemoryDumpConfig(const base::DictionaryValue& memory_dump_config);
+ void SetDefaultMemoryDumpConfig();
+
// Convert TraceConfig to the dict representation of the TraceConfig.
void ToDict(base::DictionaryValue& dict) const;
@@ -193,6 +224,8 @@ class BASE_EXPORT TraceConfig {
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
+ MemoryDumpConfig memory_dump_config_;
+
StringList included_categories_;
StringList disabled_categories_;
StringList excluded_categories_;
diff --git a/chromium/base/trace_event/trace_config_memory_test_util.h b/chromium/base/trace_event/trace_config_memory_test_util.h
new file mode 100644
index 00000000000..8d8206fd7a8
--- /dev/null
+++ b/chromium/base/trace_event/trace_config_memory_test_util.h
@@ -0,0 +1,76 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+#define BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
+
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_manager.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceConfigMemoryTestUtil {
+ public:
+ static std::string GetTraceConfig_PeriodicTriggers(int light_period,
+ int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"light\","
+ "\"periodic_interval_ms\":%d"
+ "},"
+ "{"
+ "\"mode\":\"detailed\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ }
+
+ static std::string GetTraceConfig_EmptyTriggers() {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"triggers\":["
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory);
+ }
+
+ static std::string GetTraceConfig_NoTriggers() {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_sampling\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"record_mode\":\"record-until-full\""
+ "}", MemoryDumpManager::kTraceCategory);
+ }
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index a2a3703ea3a..84da739b547 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -1,8 +1,10 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_config_memory_test_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -18,7 +20,6 @@ const char kDefaultTraceConfigString[] =
"\"excluded_categories\":[\"*Debug\",\"*Test\"],"
"\"record_mode\":\"record-until-full\""
"}";
-
} // namespace
TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -488,5 +489,37 @@ TEST(TraceConfigTest, SetTraceOptionValues) {
EXPECT_TRUE(tc.IsSystraceEnabled());
}
+TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
+ std::string tc_str =
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
+ TraceConfig tc(tc_str);
+ EXPECT_EQ(tc_str, tc.ToString());
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ EXPECT_EQ(2u, tc.memory_dump_config_.size());
+
+ EXPECT_EQ(200u, tc.memory_dump_config_[0].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
+ tc.memory_dump_config_[0].level_of_detail);
+
+ EXPECT_EQ(2000u, tc.memory_dump_config_[1].periodic_interval_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+ tc.memory_dump_config_[1].level_of_detail);
+}
+
+TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
+ // Empty trigger list should also be specified when converting back to string.
+ TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
+ EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
+ tc.ToString());
+ EXPECT_EQ(0u, tc.memory_dump_config_.size());
+}
+
+TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
+ TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
+ EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
+ EXPECT_EQ(2u, tc.memory_dump_config_.size());
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/trace_event.gypi b/chromium/base/trace_event/trace_event.gypi
index 05ec283d64d..d7ec3915e3f 100644
--- a/chromium/base/trace_event/trace_event.gypi
+++ b/chromium/base/trace_event/trace_event.gypi
@@ -17,6 +17,8 @@
'trace_event/memory_dump_request_args.h',
'trace_event/memory_dump_session_state.cc',
'trace_event/memory_dump_session_state.h',
+ 'trace_event/memory_profiler_allocation_context.cc',
+ 'trace_event/memory_profiler_allocation_context.h',
'trace_event/process_memory_dump.cc',
'trace_event/process_memory_dump.h',
'trace_event/process_memory_maps.cc',
@@ -27,17 +29,19 @@
'trace_event/process_memory_totals.h',
'trace_event/process_memory_totals_dump_provider.cc',
'trace_event/process_memory_totals_dump_provider.h',
+ 'trace_event/trace_buffer.cc',
+ 'trace_event/trace_buffer.h',
'trace_event/trace_config.cc',
'trace_event/trace_config.h',
'trace_event/trace_event.h',
'trace_event/trace_event_android.cc',
'trace_event/trace_event_argument.cc',
'trace_event/trace_event_argument.h',
+ 'trace_event/trace_event_common.h',
'trace_event/trace_event_etw_export_win.cc',
'trace_event/trace_event_etw_export_win.h',
'trace_event/trace_event_impl.cc',
'trace_event/trace_event_impl.h',
- 'trace_event/trace_event_impl_constants.cc',
'trace_event/trace_event_memory.cc',
'trace_event/trace_event_memory.h',
'trace_event/trace_event_memory_overhead.cc',
@@ -48,6 +52,11 @@
'trace_event/trace_event_system_stats_monitor.h',
'trace_event/trace_event_win.cc',
'trace_event/trace_event_win.h',
+ 'trace_event/trace_log.cc',
+ 'trace_event/trace_log.h',
+ 'trace_event/trace_log_constants.cc',
+ 'trace_event/trace_sampling_thread.cc',
+ 'trace_event/trace_sampling_thread.h',
'trace_event/winheap_dump_provider_win.cc',
'trace_event/winheap_dump_provider_win.h',
],
@@ -63,9 +72,11 @@
'trace_event/java_heap_dump_provider_android_unittest.cc',
'trace_event/memory_allocator_dump_unittest.cc',
'trace_event/memory_dump_manager_unittest.cc',
+ 'trace_event/memory_profiler_allocation_context_unittest.cc',
'trace_event/process_memory_dump_unittest.cc',
'trace_event/process_memory_maps_dump_provider_unittest.cc',
'trace_event/process_memory_totals_dump_provider_unittest.cc',
+ 'trace_event/trace_config_memory_test_util.h',
'trace_event/trace_config_unittest.cc',
'trace_event/trace_event_argument_unittest.cc',
'trace_event/trace_event_memory_unittest.cc',
diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h
index 07018b93e07..c3b20fc58d0 100644
--- a/chromium/base/trace_event/trace_event.h
+++ b/chromium/base/trace_event/trace_event.h
@@ -2,200 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This header file defines the set of trace_event macros without specifying
-// how the events actually get collected and stored. If you need to expose trace
-// events to some other universe, you can copy-and-paste this file as well as
-// trace_event.h, modifying the macros contained there as necessary for the
-// target platform. The end result is that multiple libraries can funnel events
-// through to a shared trace event collector.
-
-// Trace events are for tracking application performance and resource usage.
-// Macros are provided to track:
-// Begin and end of function calls
-// Counters
-//
-// Events are issued against categories. Whereas LOG's
-// categories are statically defined, TRACE categories are created
-// implicitly with a string. For example:
-// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
-// TRACE_EVENT_SCOPE_THREAD)
-//
-// It is often the case that one trace may belong in multiple categories at the
-// same time. The first argument to the trace can be a comma-separated list of
-// categories, forming a category group, like:
-//
-// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
-//
-// We can enable/disable tracing of OnMouseOver by enabling/disabling either
-// category.
-//
-// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
-// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
-// doSomethingCostly()
-// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
-// Note: our tools can't always determine the correct BEGIN/END pairs unless
-// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
-// need them to be in separate scopes.
-//
-// A common use case is to trace entire function scopes. This
-// issues a trace BEGIN and END automatically:
-// void doSomethingCostly() {
-// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
-// ...
-// }
-//
-// Additional parameters can be associated with an event:
-// void doSomethingCostly2(int howMuch) {
-// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
-// "howMuch", howMuch);
-// ...
-// }
-//
-// The trace system will automatically add to this information the
-// current process id, thread id, and a timestamp in microseconds.
-//
-// To trace an asynchronous procedure such as an IPC send/receive, use
-// ASYNC_BEGIN and ASYNC_END:
-// [single threaded sender code]
-// static int send_count = 0;
-// ++send_count;
-// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
-// Send(new MyMessage(send_count));
-// [receive code]
-// void OnMyMessage(send_count) {
-// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
-// }
-// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
-// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
-// Pointers can be used for the ID parameter, and they will be mangled
-// internally so that the same pointer on two different processes will not
-// match. For example:
-// class MyTracedClass {
-// public:
-// MyTracedClass() {
-// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
-// }
-// ~MyTracedClass() {
-// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
-// }
-// }
-//
-// Trace event also supports counters, which is a way to track a quantity
-// as it varies over time. Counters are created with the following macro:
-// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
-//
-// Counters are process-specific. The macro itself can be issued from any
-// thread, however.
-//
-// Sometimes, you want to track two counters at once. You can do this with two
-// counter macros:
-// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
-// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
-// Or you can do it with a combined macro:
-// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
-// "bytesPinned", g_myCounterValue[0],
-// "bytesAllocated", g_myCounterValue[1]);
-// This indicates to the tracing UI that these counters should be displayed
-// in a single graph, as a summed area chart.
-//
-// Since counters are in a global namespace, you may want to disambiguate with a
-// unique ID, by using the TRACE_COUNTER_ID* variations.
-//
-// By default, trace collection is compiled in, but turned off at runtime.
-// Collecting trace data is the responsibility of the embedding
-// application. In Chrome's case, navigating to about:tracing will turn on
-// tracing and display data collected across all active processes.
-//
-//
-// Memory scoping note:
-// Tracing copies the pointers, not the string content, of the strings passed
-// in for category_group, name, and arg_names. Thus, the following code will
-// cause problems:
-// char* str = strdup("importantName");
-// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
-// free(str); // Trace system now has dangling pointer
-//
-// To avoid this issue with the |name| and |arg_name| parameters, use the
-// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
-// Notes: The category must always be in a long-lived char* (i.e. static const).
-// The |arg_values|, when used, are always deep copied with the _COPY
-// macros.
-//
-// When are string argument values copied:
-// const char* arg_values are only referenced by default:
-// TRACE_EVENT1("category", "name",
-// "arg1", "literal string is only referenced");
-// Use TRACE_STR_COPY to force copying of a const char*:
-// TRACE_EVENT1("category", "name",
-// "arg1", TRACE_STR_COPY("string will be copied"));
-// std::string arg_values are always copied:
-// TRACE_EVENT1("category", "name",
-// "arg1", std::string("string will be copied"));
-//
-//
-// Convertable notes:
-// Converting a large data type to a string can be costly. To help with this,
-// the trace framework provides an interface ConvertableToTraceFormat. If you
-// inherit from it and implement the AppendAsTraceFormat method the trace
-// framework will call back to your object to convert a trace output time. This
-// means, if the category for the event is disabled, the conversion will not
-// happen.
-//
-// class MyData : public base::trace_event::ConvertableToTraceFormat {
-// public:
-// MyData() {}
-// void AppendAsTraceFormat(std::string* out) const override {
-// out->append("{\"foo\":1}");
-// }
-// private:
-// ~MyData() override {}
-// DISALLOW_COPY_AND_ASSIGN(MyData);
-// };
-//
-// TRACE_EVENT1("foo", "bar", "data",
-// scoped_refptr<ConvertableToTraceFormat>(new MyData()));
-//
-// The trace framework will take ownership if the passed pointer and it will
-// be free'd when the trace buffer is flushed.
-//
-// Note, we only do the conversion when the buffer is flushed, so the provided
-// data object should not be modified after it's passed to the trace framework.
-//
-//
-// Thread Safety:
-// A thread safe singleton and mutex are used for thread safety. Category
-// enabled flags are used to limit the performance impact when the system
-// is not enabled.
-//
-// TRACE_EVENT macros first cache a pointer to a category. The categories are
-// statically allocated and safe at all times, even after exit. Fetching a
-// category is protected by the TraceLog::lock_. Multiple threads initializing
-// the static variable is safe, as they will be serialized by the lock and
-// multiple calls will return the same pointer to the category.
-//
-// Then the category_group_enabled flag is checked. This is a unsigned char, and
-// not intended to be multithread safe. It optimizes access to AddTraceEvent
-// which is threadsafe internally via TraceLog::lock_. The enabled flag may
-// cause some threads to incorrectly call or skip calling AddTraceEvent near
-// the time of the system being enabled or disabled. This is acceptable as
-// we tolerate some data loss while the system is being enabled/disabled and
-// because AddTraceEvent is threadsafe internally and checks the enabled state
-// again under lock.
-//
-// Without the use of these static category pointers and enabled flags all
-// trace points would carry a significant performance cost of acquiring a lock
-// and resolving the category.
-
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_H_
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_macros_common.h instead of here.
+
#include <string>
#include "base/atomicops.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_event_common.h"
#include "base/trace_event/trace_event_memory.h"
#include "base/trace_event/trace_event_system_stats_monitor.h"
+#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
// By default, const char* argument values are assumed to have long-lived scope
@@ -203,10 +24,6 @@
#define TRACE_STR_COPY(str) \
trace_event_internal::TraceStringWithCopy(str)
-// This will mark the trace event as disabled by default. The user will need
-// to explicitly enable the event.
-#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
-
// By default, uint64 ID argument values are not mangled with the Process ID in
// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
#define TRACE_ID_MANGLE(id) \
@@ -217,105 +34,6 @@
#define TRACE_ID_DONT_MANGLE(id) \
trace_event_internal::TraceID::DontMangle(id)
-// Records a pair of begin and end events called "name" for the current
-// scope, with 0, 1 or 2 associated arguments. If the category is not
-// enabled, then this does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT0(category_group, name) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
-#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
-#define TRACE_EVENT2( \
- category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED( \
- category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
-// Use this where |name| is too generic to accurately aggregate allocations.
-#define TRACE_EVENT_WITH_MEMORY_TAG2( \
- category, name, memory_tag, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_MEMORY(category, memory_tag) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED( \
- category, name, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
-// included in official builds.
-
-#if OFFICIAL_BUILD
-#undef TRACING_IS_OFFICIAL_BUILD
-#define TRACING_IS_OFFICIAL_BUILD 1
-#elif !defined(TRACING_IS_OFFICIAL_BUILD)
-#define TRACING_IS_OFFICIAL_BUILD 0
-#endif
-
-#if TRACING_IS_OFFICIAL_BUILD
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
- arg1_name, arg1_val) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
- arg1_name, arg1_val, \
- arg2_name, arg2_val) (void)0
-#else
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
- TRACE_EVENT0(category_group, name)
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
- TRACE_EVENT_INSTANT0(category_group, name, scope)
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
- arg1_name, arg1_val) \
- TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
- arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#endif
-
-// Records a single event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
- category_group, name, TRACE_EVENT_FLAG_NONE | scope)
-#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
- category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
- category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
- category_group, name, TRACE_EVENT_FLAG_COPY | scope)
-#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, \
- arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
- category_group, name, TRACE_EVENT_FLAG_COPY | scope, arg1_name, \
- arg1_val)
-#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, \
- arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
- category_group, name, TRACE_EVENT_FLAG_COPY | scope, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
// Sets the current sample state to the given category and name (both must be
// constant strings). These states are intended for a sampling profiler.
// Implementation note: we store category and name together because we don't
@@ -342,563 +60,8 @@
trace_event_internal::TraceEventSamplingStateScope<bucket_number> \
traceEventSamplingScope(category "\0" name);
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() \
- TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-
-
-// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT_BEGIN0(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
- category_group, name, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
- category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
- category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
- category_group, name, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
- category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
- category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-
-// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
-// - |id| is used to match the _BEGIN event with the _END event.
-// Events are considered to match if their category_group, name and id values
-// all match. |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
- name, id, thread_id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
- category_group, name, id, thread_id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
- category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \
- category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// Records a single END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_EVENT_END0(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
- category_group, name, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
- category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
- category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_END0(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
- category_group, name, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
- category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
- category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-
-// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
-// - |id| is used to match the _BEGIN event with the _END event.
-// Events are considered to match if their category_group, name and id values
-// all match. |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
- name, id, thread_id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
- category_group, name, id, thread_id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
- category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \
- category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
- timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
- arg2_val)
-
-// Records the value of a counter called "name" immediately. Value
-// must be representable as a 32 bit integer.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_COUNTER1(category_group, name, value) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, TRACE_EVENT_FLAG_NONE, \
- "value", static_cast<int>(value))
-#define TRACE_COPY_COUNTER1(category_group, name, value) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, TRACE_EVENT_FLAG_COPY, \
- "value", static_cast<int>(value))
-
-// Records the values of a multi-parted counter called "name" immediately.
-// The UI will treat value1 and value2 as parts of a whole, displaying their
-// values as a stacked-bar chart.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
- value2_name, value2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, TRACE_EVENT_FLAG_NONE, \
- value1_name, static_cast<int>(value1_val), \
- value2_name, static_cast<int>(value2_val))
-#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
- value2_name, value2_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, TRACE_EVENT_FLAG_COPY, \
- value1_name, static_cast<int>(value1_val), \
- value2_name, static_cast<int>(value2_val))
-
-// Records the value of a counter called "name" immediately. Value
-// must be representable as a 32 bit integer.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to disambiguate counters with the same name. It must either
-// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
-// will be xored with a hash of the process ID so that the same pointer on
-// two different processes will not collide.
-#define TRACE_COUNTER_ID1(category_group, name, id, value) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, \
- "value", static_cast<int>(value))
-#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- "value", static_cast<int>(value))
-
-// Records the values of a multi-parted counter called "name" immediately.
-// The UI will treat value1 and value2 as parts of a whole, displaying their
-// values as a stacked-bar chart.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to disambiguate counters with the same name. It must either
-// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
-// will be xored with a hash of the process ID so that the same pointer on
-// two different processes will not collide.
-#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
- value2_name, value2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, \
- value1_name, static_cast<int>(value1_val), \
- value2_name, static_cast<int>(value2_val))
-#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
- value1_val, value2_name, value2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- value1_name, static_cast<int>(value1_val), \
- value2_name, static_cast<int>(value2_val))
-
-// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
-#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \
- thread_id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
- TRACE_EVENT_FLAG_NONE)
-
-#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \
- category_group, name, thread_id, timestamp, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
- TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-
-#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \
- thread_id, timestamp, \
- arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
- TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
-
-// ASYNC_STEP_* APIs should be only used by legacy code. New code should
-// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
-// event.
-// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
-// events are considered to match if their category_group, name and id values
-// all match. |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-//
-// An asynchronous operation can consist of multiple phases. The first phase is
-// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
-// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
-// annotate the block following the call. The ASYNC_STEP_PAST macro will
-// annotate the block prior to the call. Note that any particular event must use
-// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
-// operation completes, call ASYNC_END.
-//
-// An ASYNC trace typically occurs on a single thread (if not, they will only be
-// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
-// operation must use the same |name| and |id|. Each step can have its own
-// args.
-#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
-// provided.
-#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, \
- name, id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
- static_cast<int>(base::PlatformThread::CurrentId()), \
- timestamp, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, \
- name, id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
- static_cast<int>(base::PlatformThread::CurrentId()), \
- timestamp, TRACE_EVENT_FLAG_COPY)
-
-// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
-// category is not enabled, then this does nothing. The |name| and |id| must
-// match the ASYNC_BEGIN event above. The |step| param identifies this step
-// within the async event. This should be called at the beginning of the next
-// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
-// ASYNC_STEP_PAST events.
-#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
-#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
- arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
- arg1_name, arg1_val)
-
-// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
-// provided.
-#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, \
- id, step, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
- static_cast<int>(base::PlatformThread::CurrentId()), \
- timestamp, TRACE_EVENT_FLAG_NONE, "step", step)
-
-// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
-// category is not enabled, then this does nothing. The |name| and |id| must
-// match the ASYNC_BEGIN event above. The |step| param identifies this step
-// within the async event. This should be called at the beginning of the next
-// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
-// ASYNC_STEP_INTO events.
-#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
-#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
- arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
- arg1_name, arg1_val)
-
-// Records a single ASYNC_END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
-#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, \
- name, id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
- static_cast<int>(base::PlatformThread::CurrentId()), \
- timestamp, TRACE_EVENT_FLAG_NONE)
-
-// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
-// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
-// events.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
-// considered as a match if their category_group, name and id all match.
-// - |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-// - |id| is used to match a child NESTABLE_ASYNC event with its parent
-// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
-// be logged using the same id and category_group.
-//
-// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
-// at the first NESTABLE_ASYNC event of that id, and unmatched
-// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
-// NESTABLE_ASYNC event of that id. Corresponding warning messages for
-// unmatched events will be shown in the analysis view.
-
-// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
-// 0, 1 or 2 associated arguments. If the category is not enabled, then this
-// does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
-// or 2 associated arguments. If the category is not enabled, then this does
-// nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-
-#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(category_group, name, \
- id, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(category_group, name, \
- id, arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
- category_group, name, id, \
- TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
-// |timestamp| provided.
-#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
- id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
- static_cast<int>(base::PlatformThread::CurrentId()), timestamp, \
- TRACE_EVENT_FLAG_NONE)
-
-#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
- id, timestamp) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
- static_cast<int>(base::PlatformThread::CurrentId()), timestamp, \
- TRACE_EVENT_FLAG_NONE)
-
-// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
-// with 2 associated arguments. If the category is not enabled, then this
-// does nothing.
-#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(category_group, name, id, \
- arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-
-// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
-// associated arguments. If the category is not enabled, then this
-// does nothing.
-// - category and name strings must have application lifetime (statics or
-// literals). They may not include " chars.
-// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
-// events are considered to match if their category_group, name and id values
-// all match. |id| must either be a pointer or an integer value up to 64 bits.
-// If it's a pointer, the bits will be xored with a hash of the process ID so
-// that the same pointer on two different processes will not collide.
-// FLOW events are different from ASYNC events in how they are drawn by the
-// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
-// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
-// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
-// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
-// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
-// macros. When the operation completes, call FLOW_END. An async operation can
-// span threads and processes, but all events in that operation must use the
-// same |name| and |id|. Each event can have its own args.
-#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Records a single FLOW_STEP event for |step| immediately. If the category
-// is not enabled, then this does nothing. The |name| and |id| must match the
-// FLOW_BEGIN event above. The |step| param identifies this step within the
-// async event. This should be called at the beginning of the next phase of an
-// asynchronous operation.
-#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
-#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, \
- arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step)
-#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, \
- arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \
- arg1_name, arg1_val)
-
-// Records a single FLOW_END event for "name" immediately. If the category
-// is not enabled, then this does nothing.
-#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE)
-#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
-#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
-#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_NONE, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
- arg1_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val)
-#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
- category_group, name, id, TRACE_EVENT_FLAG_COPY, \
- arg1_name, arg1_val, arg2_name, arg2_val)
-
-// Macros to track the life time and value of arbitrary client objects.
-// See also TraceTrackableObject.
-#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT, \
- category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
-
-#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, \
- category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE,\
- "snapshot", snapshot)
-
-#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
- category_group, name, id, timestamp, snapshot) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
- TRACE_ID_DONT_MANGLE(id), \
- static_cast<int>(base::PlatformThread::CurrentId()), timestamp, \
- TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
-
-#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
- INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT, \
- category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_API_CURRENT_THREAD_ID \
+ static_cast<int>(base::PlatformThread::CurrentId())
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
@@ -906,33 +69,6 @@
base::trace_event::TraceLog::ENABLED_FOR_EVENT_CALLBACK | \
base::trace_event::TraceLog::ENABLED_FOR_ETW_EXPORT))
-// Macro to efficiently determine if a given category group is enabled.
-#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- *ret = true; \
- } else { \
- *ret = false; \
- } \
- } while (0)
-
-// Macro to efficiently determine, through polling, if a new trace has begun.
-#define TRACE_EVENT_IS_NEW_TRACE(ret) \
- do { \
- static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
- int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
- if (num_traces_recorded != -1 && \
- num_traces_recorded != \
- INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
- INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = \
- num_traces_recorded; \
- *ret = true; \
- } else { \
- *ret = false; \
- } \
- } while (0)
-
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
@@ -965,24 +101,47 @@
// const char** arg_names,
// const unsigned char* arg_types,
// const unsigned long long* arg_values,
-// unsigned char flags)
+// const scoped_refptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT \
base::trace_event::TraceLog::GetInstance()->AddTraceEvent
// Add a trace event to the platform tracing system.
// base::trace_event::TraceEventHandle
+// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_CONTEXT_ID(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// unsigned long long id,
+// unsigned long long context_id,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// const scoped_refptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_CONTEXT_ID \
+ base::trace_event::TraceLog::GetInstance()->AddTraceEventWithContextId
+
+// Add a trace event to the platform tracing system.
+// base::trace_event::TraceEventHandle
// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
// char phase,
// const unsigned char* category_group_enabled,
// const char* name,
// unsigned long long id,
+// unsigned long long context_id,
// int thread_id,
// const TraceTicks& timestamp,
// int num_args,
// const char** arg_names,
// const unsigned char* arg_types,
// const unsigned long long* arg_values,
-// unsigned char flags)
+// const scoped_refptr<ConvertableToTraceFormat>*
+// convertable_values,
+// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
base::trace_event::TraceLog::GetInstance() \
->AddTraceEventWithThreadIdAndTimestamp
@@ -1055,7 +214,8 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
trace_event_internal::AddTraceEvent( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kNoEventId, flags, ##__VA_ARGS__); \
+ trace_event_internal::kNoId, flags, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
@@ -1070,12 +230,30 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
trace_event_internal::AddTraceEvent( \
TRACE_EVENT_PHASE_COMPLETE, \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kNoEventId, TRACE_EVENT_FLAG_NONE, \
- ##__VA_ARGS__); \
+ trace_event_internal::kNoId, TRACE_EVENT_FLAG_NONE, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
}
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW( \
+ category_group, name, bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flow_flags; \
+ trace_event_internal::TraceID trace_event_bind_id(bind_id, \
+ &trace_event_flags); \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kNoId, trace_event_flags, \
+ trace_event_bind_id.data(), ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
+
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
@@ -1083,13 +261,13 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
trace_event_internal::TraceID trace_event_trace_id( \
id, &trace_event_flags); \
trace_event_internal::AddTraceEvent( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
name, trace_event_trace_id.data(), trace_event_flags, \
- ##__VA_ARGS__); \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
@@ -1100,86 +278,25 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
trace_event_internal::TraceID trace_event_trace_id( \
id, &trace_event_flags); \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, trace_event_trace_id.data(), \
+ name, trace_event_trace_id.data(), trace_event_internal::kNoId, \
thread_id, base::TraceTicks::FromInternalValue(timestamp), \
trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
- ##__VA_ARGS__); \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
-// Notes regarding the following definitions:
-// New values can be added and propagated to third party libraries, but existing
-// definitions must never be changed, because third party libraries may use old
-// definitions.
-
-// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
-#define TRACE_EVENT_PHASE_BEGIN ('B')
-#define TRACE_EVENT_PHASE_END ('E')
-#define TRACE_EVENT_PHASE_COMPLETE ('X')
-#define TRACE_EVENT_PHASE_INSTANT ('I')
-#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
-#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
-#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
-#define TRACE_EVENT_PHASE_ASYNC_END ('F')
-#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
-#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
-#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
-#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
-#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
-#define TRACE_EVENT_PHASE_FLOW_END ('f')
-#define TRACE_EVENT_PHASE_METADATA ('M')
-#define TRACE_EVENT_PHASE_COUNTER ('C')
-#define TRACE_EVENT_PHASE_SAMPLE ('P')
-#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
-#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
-#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
-#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
-
-// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
-#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0))
-#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0))
-#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1))
-#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2))
-#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned char>(1 << 3))
-#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned char>(1 << 4))
-#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned char>(1 << 5))
-#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned char>(1 << 6))
-#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned char>(1 << 7))
-
-#define TRACE_EVENT_FLAG_SCOPE_MASK (static_cast<unsigned char>( \
- TRACE_EVENT_FLAG_SCOPE_OFFSET | TRACE_EVENT_FLAG_SCOPE_EXTRA))
-
-// Type values for identifying types in the TraceValue union.
-#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
-#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
-#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
-#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
-#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
-#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
-#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
-#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
-
-// Enum reflecting the scope of an INSTANT event. Must fit within
-// TRACE_EVENT_FLAG_SCOPE_MASK.
-#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
-#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
-#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
-
-#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
-#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
-#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
namespace trace_event_internal {
// Specify these values when the corresponding argument of AddTraceEvent is not
// used.
const int kZeroNumArgs = 0;
-const unsigned long long kNoEventId = 0;
+const unsigned long long kNoId = 0;
// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
// are by default mangled with the Process ID so that they are unlikely to
@@ -1232,35 +349,35 @@ class TraceID {
private:
unsigned long long data_;
};
- TraceID(const void* id, unsigned char* flags)
+ TraceID(const void* id, unsigned int* flags)
: data_(static_cast<unsigned long long>(
reinterpret_cast<uintptr_t>(id))) {
*flags |= TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
+ TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
*flags |= TRACE_EVENT_FLAG_MANGLE_ID;
}
- TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) {
+ TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {
}
- TraceID(unsigned long long id, unsigned char* flags)
+ TraceID(unsigned long long id, unsigned int* flags)
: data_(id) { (void)flags; }
- TraceID(unsigned long id, unsigned char* flags)
+ TraceID(unsigned long id, unsigned int* flags)
: data_(id) { (void)flags; }
- TraceID(unsigned int id, unsigned char* flags)
+ TraceID(unsigned int id, unsigned int* flags)
: data_(id) { (void)flags; }
- TraceID(unsigned short id, unsigned char* flags)
+ TraceID(unsigned short id, unsigned int* flags)
: data_(id) { (void)flags; }
- TraceID(unsigned char id, unsigned char* flags)
+ TraceID(unsigned char id, unsigned int* flags)
: data_(id) { (void)flags; }
- TraceID(long long id, unsigned char* flags)
+ TraceID(long long id, unsigned int* flags)
: data_(static_cast<unsigned long long>(id)) { (void)flags; }
- TraceID(long id, unsigned char* flags)
+ TraceID(long id, unsigned int* flags)
: data_(static_cast<unsigned long long>(id)) { (void)flags; }
- TraceID(int id, unsigned char* flags)
+ TraceID(int id, unsigned int* flags)
: data_(static_cast<unsigned long long>(id)) { (void)flags; }
- TraceID(short id, unsigned char* flags)
+ TraceID(short id, unsigned int* flags)
: data_(static_cast<unsigned long long>(id)) { (void)flags; }
- TraceID(signed char id, unsigned char* flags)
+ TraceID(signed char id, unsigned int* flags)
: data_(static_cast<unsigned long long>(id)) { (void)flags; }
unsigned long long data() const { return data_; }
@@ -1390,17 +507,19 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
arg1_val) {
const int num_args = 1;
unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- num_args, &arg1_name, arg_types, NULL, &arg1_val, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, num_args, &arg1_name, arg_types, NULL, &arg1_val, flags);
}
template<class ARG1_TYPE>
@@ -1410,9 +529,11 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const ARG1_TYPE& arg1_val,
const char* arg2_name,
@@ -1431,8 +552,9 @@ AddTraceEventWithThreadIdAndTimestamp(
convertable_values[1] = arg2_val;
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, arg_values,
+ convertable_values, flags);
}
template<class ARG2_TYPE>
@@ -1442,9 +564,11 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const scoped_refptr<base::trace_event::ConvertableToTraceFormat>& arg1_val,
const char* arg2_name,
@@ -1463,8 +587,9 @@ AddTraceEventWithThreadIdAndTimestamp(
convertable_values[0] = arg1_val;
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, arg_values,
+ convertable_values, flags);
}
static inline base::trace_event::TraceEventHandle
@@ -1473,9 +598,11 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const scoped_refptr<base::trace_event::ConvertableToTraceFormat>& arg1_val,
const char* arg2_name,
@@ -1489,8 +616,9 @@ AddTraceEventWithThreadIdAndTimestamp(
convertable_values[2] = {arg1_val, arg2_val};
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- num_args, arg_names, arg_types, NULL, convertable_values, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, NULL, convertable_values,
+ flags);
}
static inline base::trace_event::TraceEventHandle
@@ -1499,12 +627,14 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags) {
+ unsigned int flags,
+ unsigned long long bind_id) {
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
}
static inline base::trace_event::TraceEventHandle AddTraceEvent(
@@ -1512,11 +642,13 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
- unsigned char flags) {
+ unsigned int flags,
+ unsigned long long bind_id) {
const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
const base::TraceTicks now = base::TraceTicks::Now();
return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
- name, id, thread_id, now, flags);
+ name, id, kNoId, thread_id, now,
+ flags, bind_id);
}
template<class ARG1_TYPE>
@@ -1526,9 +658,11 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const ARG1_TYPE& arg1_val) {
const int num_args = 1;
@@ -1536,8 +670,8 @@ AddTraceEventWithThreadIdAndTimestamp(
unsigned long long arg_values[1];
SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- num_args, &arg1_name, arg_types, arg_values, NULL, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, num_args, &arg1_name, arg_types, arg_values, NULL, flags);
}
template<class ARG1_TYPE>
@@ -1546,13 +680,15 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const ARG1_TYPE& arg1_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
base::TraceTicks now = base::TraceTicks::Now();
return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
- name, id, thread_id, now, flags,
+ name, id, kNoId, thread_id, now,
+ flags, bind_id,
arg1_name, arg1_val);
}
@@ -1563,9 +699,11 @@ AddTraceEventWithThreadIdAndTimestamp(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
int thread_id,
const base::TraceTicks& timestamp,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const ARG1_TYPE& arg1_val,
const char* arg2_name,
@@ -1577,8 +715,8 @@ AddTraceEventWithThreadIdAndTimestamp(
SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- phase, category_group_enabled, name, id, thread_id, timestamp,
- num_args, arg_names, arg_types, arg_values, NULL, flags);
+ phase, category_group_enabled, name, id, context_id, bind_id, thread_id,
+ timestamp, num_args, arg_names, arg_types, arg_values, NULL, flags);
}
template<class ARG1_TYPE, class ARG2_TYPE>
@@ -1587,7 +725,8 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
- unsigned char flags,
+ unsigned int flags,
+ unsigned long long bind_id,
const char* arg1_name,
const ARG1_TYPE& arg1_val,
const char* arg2_name,
@@ -1595,7 +734,8 @@ static inline base::trace_event::TraceEventHandle AddTraceEvent(
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
base::TraceTicks now = base::TraceTicks::Now();
return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
- name, id, thread_id, now, flags,
+ name, id, kNoId, thread_id, now,
+ flags, bind_id,
arg1_name, arg1_val,
arg2_name, arg2_val);
}
diff --git a/chromium/base/trace_event/trace_event_android.cc b/chromium/base/trace_event/trace_event_android.cc
index 64331854f61..7815107b76e 100644
--- a/chromium/base/trace_event/trace_event_android.cc
+++ b/chromium/base/trace_event/trace_event_android.cc
@@ -29,7 +29,7 @@ void WriteEvent(
const unsigned char* arg_types,
const TraceEvent::TraceValue* arg_values,
const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags) {
+ unsigned int flags) {
std::string out = StringPrintf("%c|%d|%s", phase, getpid(), name);
if (flags & TRACE_EVENT_FLAG_HAS_ID)
StringAppendF(&out, "-%" PRIx64, static_cast<uint64>(id));
diff --git a/chromium/base/trace_event/trace_event_argument.cc b/chromium/base/trace_event/trace_event_argument.cc
index 14a4499c1f6..81b6ce039f5 100644
--- a/chromium/base/trace_event/trace_event_argument.cc
+++ b/chromium/base/trace_event/trace_event_argument.cc
@@ -4,6 +4,7 @@
#include "base/trace_event/trace_event_argument.h"
+#include "base/bits.h"
#include "base/json/json_writer.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "base/values.h"
@@ -356,7 +357,8 @@ scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
case kTypeStartDict: {
auto new_dict = new DictionaryValue();
if (cur_dict) {
- cur_dict->Set(ReadKeyName(it), make_scoped_ptr(new_dict));
+ cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
+ make_scoped_ptr(new_dict));
stack.push_back(cur_dict);
cur_dict = new_dict;
} else {
@@ -380,7 +382,8 @@ scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
case kTypeStartArray: {
auto new_list = new ListValue();
if (cur_dict) {
- cur_dict->Set(ReadKeyName(it), make_scoped_ptr(new_list));
+ cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
+ make_scoped_ptr(new_list));
stack.push_back(cur_dict);
cur_dict = nullptr;
cur_list = new_list;
@@ -395,7 +398,7 @@ scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
bool value;
CHECK(it.ReadBool(&value));
if (cur_dict) {
- cur_dict->SetBoolean(ReadKeyName(it), value);
+ cur_dict->SetBooleanWithoutPathExpansion(ReadKeyName(it), value);
} else {
cur_list->AppendBoolean(value);
}
@@ -405,7 +408,7 @@ scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
int value;
CHECK(it.ReadInt(&value));
if (cur_dict) {
- cur_dict->SetInteger(ReadKeyName(it), value);
+ cur_dict->SetIntegerWithoutPathExpansion(ReadKeyName(it), value);
} else {
cur_list->AppendInteger(value);
}
@@ -415,7 +418,7 @@ scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
double value;
CHECK(it.ReadDouble(&value));
if (cur_dict) {
- cur_dict->SetDouble(ReadKeyName(it), value);
+ cur_dict->SetDoubleWithoutPathExpansion(ReadKeyName(it), value);
} else {
cur_list->AppendDouble(value);
}
@@ -425,7 +428,7 @@ scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
std::string value;
CHECK(it.ReadString(&value));
if (cur_dict) {
- cur_dict->SetString(ReadKeyName(it), value);
+ cur_dict->SetStringWithoutPathExpansion(ReadKeyName(it), value);
} else {
cur_list->AppendString(value);
}
@@ -453,9 +456,14 @@ void TracedValue::AppendAsTraceFormat(std::string* out) const {
void TracedValue::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
+ const size_t kPickleHeapAlign = 4096; // Must be == Pickle::kPickleHeapAlign.
overhead->Add("TracedValue",
- pickle_.GetTotalAllocatedSize() /* allocated size */,
- pickle_.size() /* resident size */);
+
+ /* allocated size */
+ bits::Align(pickle_.GetTotalAllocatedSize(), kPickleHeapAlign),
+
+ /* resident size */
+ bits::Align(pickle_.size(), kPickleHeapAlign));
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/trace_event_argument_unittest.cc b/chromium/base/trace_event/trace_event_argument_unittest.cc
index cb1cf2ef1d5..c1233ac3a3c 100644
--- a/chromium/base/trace_event/trace_event_argument_unittest.cc
+++ b/chromium/base/trace_event/trace_event_argument_unittest.cc
@@ -22,6 +22,19 @@ TEST(TraceEventArgumentTest, FlatDictionary) {
json);
}
+TEST(TraceEventArgumentTest, NoDotPathExpansion) {
+ scoped_refptr<TracedValue> value = new TracedValue();
+ value->SetInteger("in.t", 2014);
+ value->SetDouble("doub.le", 0.0);
+ value->SetBoolean("bo.ol", true);
+ value->SetString("str.ing", "str.ing");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ(
+ "{\"bo.ol\":true,\"doub.le\":0.0,\"in.t\":2014,\"str.ing\":\"str.ing\"}",
+ json);
+}
+
TEST(TraceEventArgumentTest, Hierarchy) {
scoped_refptr<TracedValue> value = new TracedValue();
value->SetInteger("i0", 2014);
diff --git a/chromium/base/trace_event/trace_event_common.h b/chromium/base/trace_event/trace_event_common.h
new file mode 100644
index 00000000000..aa5a4935362
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_common.h
@@ -0,0 +1,1038 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// IMPORTANT: To avoid conflicts, if you need to modify this file for a library,
+// land your change in base/ first, and then copy-and-paste it.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+// TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("importantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+// class MyData : public base::trace_event::ConvertableToTraceFormat {
+// public:
+// MyData() {}
+// void AppendAsTraceFormat(std::string* out) const override {
+// out->append("{\"foo\":1}");
+// }
+// private:
+// ~MyData() override {}
+// DISALLOW_COPY_AND_ASSIGN(MyData);
+// };
+//
+// TRACE_EVENT1("foo", "bar", "data",
+// scoped_refptr<ConvertableToTraceFormat>(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+#if defined(TRACE_EVENT0)
+#error "Another copy of this file has already been included."
+#endif
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+ flow_flags, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
+// Use this where |name| is too generic to accurately aggregate allocations.
+#define TRACE_EVENT_WITH_MEMORY_TAG2(category, name, memory_tag, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_MEMORY(category, memory_tag) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(categoryAndName) \
+ TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, categoryAndName)
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+ arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+#define TRACE_EVENT_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK, \
+ category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+ TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK, \
+ category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_COPY)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, \
+ arg2_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_NONE, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ TRACE_EVENT_FLAG_COPY, value1_name, \
+ static_cast<int>(value1_val), value2_name, \
+ static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, "value", \
+ static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, "value", \
+ static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
+ value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// TRACE_EVENT_SAMPLE_* events are injected by the sampling profiler.
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP0(category_group, name, \
+ thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1( \
+ category_group, name, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP2(category_group, name, \
+ thread_id, timestamp, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SAMPLE, category_group, name, 0, thread_id, timestamp, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Similar to TRACE_EVENT_ASYNC_STEP_INTOx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+ step, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ "step", step)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - A pair of NESTABLE_ASYNC_BEGIN event and NESTABLE_ASYNC_END event is
+// considered as a match if their category_group, name and id all match.
+// - |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// - |id| is used to match a child NESTABLE_ASYNC event with its parent
+// NESTABLE_ASYNC event. Therefore, events in the same nested event tree must
+// be logged using the same id and category_group.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an event that starts
+// at the first NESTABLE_ASYNC event of that id, and unmatched
+// NESTABLE_ASYNC_BEGIN event will be parsed as an event that ends at the last
+// NESTABLE_ASYNC event of that id. Corresponding warning messages for
+// unmatched events will be shown in the analysis view.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with
+// 0, 1 or 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 0
+// or 2 associated arguments. If the category is not enabled, then this does
+// nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 1
+// associated argument. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with one associated argument. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_NESTABLE_ASYNC_{BEGIN,END}x but with a custom
+// |timestamp| provided.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+ id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0( \
+ category_group, name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2( \
+ category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_BEGIN, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_FLOW_STEP, category_group, name, id, \
+ TRACE_EVENT_FLAG_COPY, "step", step, arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, \
+ TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, category_group, \
+ name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
+ arg1_val, arg2_name, arg2_val)
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_CREATE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+ snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP( \
+ category_group, name, id, timestamp, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Macro to explicitly warm up a given category group. This could be useful in
+// cases where we want to initialize a category group before any trace events
+// for that category group is reported. For example, to have a category group
+// always show up in the "record categories" list for manually selecting
+// settings in about://tracing.
+#define TRACE_EVENT_WARMUP_CATEGORY(category_group) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+ do { \
+ static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
+ int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
+ if (num_traces_recorded != -1 && \
+ num_traces_recorded != \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = num_traces_recorded; \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
+#define TRACE_EVENT_PHASE_MARK ('R')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned int>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned int>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned int>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned int>(1 << 3))
+#define TRACE_EVENT_FLAG_SCOPE_EXTRA (static_cast<unsigned int>(1 << 4))
+#define TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP (static_cast<unsigned int>(1 << 5))
+#define TRACE_EVENT_FLAG_ASYNC_TTS (static_cast<unsigned int>(1 << 6))
+#define TRACE_EVENT_FLAG_BIND_TO_ENCLOSING (static_cast<unsigned int>(1 << 7))
+#define TRACE_EVENT_FLAG_FLOW_IN (static_cast<unsigned int>(1 << 8))
+#define TRACE_EVENT_FLAG_FLOW_OUT (static_cast<unsigned int>(1 << 9))
+#define TRACE_EVENT_FLAG_HAS_CONTEXT_ID (static_cast<unsigned int>(1 << 10))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK \
+ (static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
+ TRACE_EVENT_FLAG_SCOPE_EXTRA))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.cc b/chromium/base/trace_event/trace_event_etw_export_win.cc
index e61a7b29645..1e05e68adfb 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.cc
+++ b/chromium/base/trace_event/trace_event_etw_export_win.cc
@@ -9,6 +9,7 @@
#include "base/memory/singleton.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/platform_thread.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
@@ -42,12 +43,12 @@ tEventRegister EventRegisterProc = nullptr;
tEventWrite EventWriteProc = nullptr;
tEventUnregister EventUnregisterProc = nullptr;
-// |filtered_event_group_names| contains the event categories that can be
+// |kFilteredEventGroupNames| contains the event categories that can be
// exported individually. These categories can be enabled by passing the correct
// keyword when starting the trace. A keyword is a 64-bit flag and we attribute
// one bit per category. We can therefore enable a particular category by
// setting its corresponding bit in the keyword. For events that are not present
-// in |filtered_event_group_names|, we have two bits that control their
+// in |kFilteredEventGroupNames|, we have two bits that control their
// behaviour. When bit 61 is enabled, any event that is not disabled by default
// (ie. doesn't start with disabled-by-default-) will be exported. Likewise,
// when bit 62 is enabled, any event that is disabled by default will be
@@ -70,7 +71,7 @@ tEventUnregister EventUnregisterProc = nullptr;
// refers to keywords as flags and there are two ways to enable them, using
// group names or the hex representation. We only support the latter. Also, we
// ignore the level.
-const char* const filtered_event_group_names[] = {
+const char* const kFilteredEventGroupNames[] = {
"benchmark", // 0x1
"blink", // 0x2
"browser", // 0x4
@@ -85,11 +86,13 @@ const char* const filtered_event_group_names[] = {
"disabled-by-default-cc.debug", // 0x800
"disabled-by-default-cc.debug.picture", // 0x1000
"disabled-by-default-toplevel.flow"}; // 0x2000
-const char* other_events_group_name = "__OTHER_EVENTS"; // 0x2000000000000000
-const char* disabled_other_events_group_name =
+const char kOtherEventsGroupName[] = "__OTHER_EVENTS"; // 0x2000000000000000
+const char kDisabledOtherEventsGroupName[] =
"__DISABLED_OTHER_EVENTS"; // 0x4000000000000000
-uint64 other_events_keyword_bit = 1ULL << 61;
-uint64 disabled_other_events_keyword_bit = 1ULL << 62;
+const uint64 kOtherEventsKeywordBit = 1ULL << 61;
+const uint64 kDisabledOtherEventsKeywordBit = 1ULL << 62;
+const size_t kNumberOfCategories = ARRAYSIZE(kFilteredEventGroupNames) + 2U;
+
} // namespace
// Redirector function for EventRegister. Called by macros in
@@ -127,6 +130,30 @@ ULONG EVNTAPI EventUnregister(REGHANDLE RegHandle) {
namespace base {
namespace trace_event {
+// This object will be created by each process. It's a background (low-priority)
+// thread that will monitor the ETW keyword for any changes.
+class TraceEventETWExport::ETWKeywordUpdateThread
+ : public PlatformThread::Delegate {
+ public:
+ ETWKeywordUpdateThread() {}
+ ~ETWKeywordUpdateThread() override {}
+
+ // Implementation of PlatformThread::Delegate:
+ void ThreadMain() override {
+ PlatformThread::SetName("ETW Keyword Update Thread");
+ TimeDelta sleep_time = TimeDelta::FromMilliseconds(kUpdateTimerDelayMs);
+ while (1) {
+ PlatformThread::Sleep(sleep_time);
+ trace_event::TraceEventETWExport::UpdateETWKeyword();
+ }
+ }
+
+ private:
+ // Time between checks for ETW keyword changes (in milliseconds).
+ unsigned int kUpdateTimerDelayMs = 1000;
+};
+
+
TraceEventETWExport::TraceEventETWExport()
: etw_export_enabled_(false), etw_match_any_keyword_(0ULL) {
// Find Advapi32.dll. This should always succeed.
@@ -143,9 +170,17 @@ TraceEventETWExport::TraceEventETWExport()
// Register the ETW provider. If registration fails then the event logging
// calls will fail (on XP this call will do nothing).
EventRegisterChrome();
-
- UpdateEnabledCategories();
}
+
+ // Make sure to initialize the map with all the group names. Subsequent
+ // modifications will be made by the background thread and only affect the
+ // values of the keys (no key addition/deletion). Therefore, the map does not
+ // require a lock for access.
+ for (int i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++)
+ categories_status_[kFilteredEventGroupNames[i]] = false;
+ categories_status_[kOtherEventsGroupName] = false;
+ categories_status_[kDisabledOtherEventsGroupName] = false;
+ DCHECK_EQ(kNumberOfCategories, categories_status_.size());
}
TraceEventETWExport::~TraceEventETWExport() {
@@ -160,19 +195,33 @@ TraceEventETWExport* TraceEventETWExport::GetInstance() {
// static
void TraceEventETWExport::EnableETWExport() {
- if (GetInstance())
- GetInstance()->etw_export_enabled_ = true;
+ auto* instance = GetInstance();
+ if (instance && !instance->etw_export_enabled_) {
+ instance->etw_export_enabled_ = true;
+ // Sync the enabled categories with ETW by calling UpdateEnabledCategories()
+ // that checks the keyword. Then create a thread that will call that same
+ // function periodically, to make sure we stay in sync.
+ instance->UpdateEnabledCategories();
+ if (instance->keyword_update_thread_handle_.is_null()) {
+ instance->keyword_update_thread_.reset(new ETWKeywordUpdateThread);
+ PlatformThread::CreateWithPriority(
+ 0, instance->keyword_update_thread_.get(),
+ &instance->keyword_update_thread_handle_, ThreadPriority::BACKGROUND);
+ }
+ }
}
// static
void TraceEventETWExport::DisableETWExport() {
- if (GetInstance())
- GetInstance()->etw_export_enabled_ = false;
+ auto* instance = GetInstance();
+ if (instance && instance->etw_export_enabled_)
+ instance->etw_export_enabled_ = false;
}
// static
bool TraceEventETWExport::IsETWExportEnabled() {
- return (GetInstance() && GetInstance()->etw_export_enabled_);
+ auto* instance = GetInstance();
+ return (instance && instance->etw_export_enabled_);
}
// static
@@ -187,8 +236,8 @@ void TraceEventETWExport::AddEvent(
const unsigned long long* arg_values,
const scoped_refptr<ConvertableToTraceFormat>* convertable_values) {
// We bail early in case exporting is disabled or no consumer is listening.
- if (!GetInstance() || !GetInstance()->etw_export_enabled_ ||
- !EventEnabledChromeEvent())
+ auto* instance = GetInstance();
+ if (!instance || !instance->etw_export_enabled_ || !EventEnabledChromeEvent())
return;
const char* phase_string = nullptr;
@@ -294,8 +343,8 @@ void TraceEventETWExport::AddCustomEvent(const char* name,
const char* arg_value_2,
const char* arg_name_3,
const char* arg_value_3) {
- if (!GetInstance() || !GetInstance()->etw_export_enabled_ ||
- !EventEnabledChromeEvent())
+ auto* instance = GetInstance();
+ if (!instance || !instance->etw_export_enabled_ || !EventEnabledChromeEvent())
return;
EventWriteChromeEvent(name, phase, arg_name_1, arg_value_1, arg_name_2,
@@ -306,7 +355,7 @@ void TraceEventETWExport::AddCustomEvent(const char* name,
bool TraceEventETWExport::IsCategoryGroupEnabled(
const char* category_group_name) {
DCHECK(category_group_name);
- auto instance = GetInstance();
+ auto* instance = GetInstance();
if (instance == nullptr)
return false;
@@ -334,30 +383,36 @@ bool TraceEventETWExport::UpdateEnabledCategories() {
// recording tools) using the ETW infrastructure. This value will be set in
// all Chrome processes that have registered their ETW provider.
etw_match_any_keyword_ = CHROME_Context.MatchAnyKeyword;
- for (int i = 0; i < ARRAYSIZE(filtered_event_group_names); i++) {
+ for (int i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++) {
if (etw_match_any_keyword_ & (1ULL << i)) {
- categories_status_[filtered_event_group_names[i]] = true;
+ categories_status_[kFilteredEventGroupNames[i]] = true;
} else {
- categories_status_[filtered_event_group_names[i]] = false;
+ categories_status_[kFilteredEventGroupNames[i]] = false;
}
}
// Also update the two default categories.
- if (etw_match_any_keyword_ & other_events_keyword_bit) {
- categories_status_[other_events_group_name] = true;
+ if (etw_match_any_keyword_ & kOtherEventsKeywordBit) {
+ categories_status_[kOtherEventsGroupName] = true;
} else {
- categories_status_[other_events_group_name] = false;
+ categories_status_[kOtherEventsGroupName] = false;
}
- if (etw_match_any_keyword_ & disabled_other_events_keyword_bit) {
- categories_status_[disabled_other_events_group_name] = true;
+ if (etw_match_any_keyword_ & kDisabledOtherEventsKeywordBit) {
+ categories_status_[kDisabledOtherEventsGroupName] = true;
} else {
- categories_status_[disabled_other_events_group_name] = false;
+ categories_status_[kDisabledOtherEventsGroupName] = false;
}
+ DCHECK_EQ(kNumberOfCategories, categories_status_.size());
+
+ // Update the categories in TraceLog.
+ TraceLog::GetInstance()->UpdateETWCategoryGroupEnabledFlags();
+
return true;
}
bool TraceEventETWExport::IsCategoryEnabled(const char* category_name) const {
+ DCHECK_EQ(kNumberOfCategories, categories_status_.size());
// Try to find the category and return its status if found
auto it = categories_status_.find(category_name);
if (it != categories_status_.end())
@@ -366,15 +421,23 @@ bool TraceEventETWExport::IsCategoryEnabled(const char* category_name) const {
// Otherwise return the corresponding default status by first checking if the
// category is disabled by default.
if (StringPiece(category_name).starts_with("disabled-by-default")) {
- DCHECK(categories_status_.find(disabled_other_events_group_name) !=
+ DCHECK(categories_status_.find(kDisabledOtherEventsGroupName) !=
categories_status_.end());
- return categories_status_.find(disabled_other_events_group_name)->second;
+ return categories_status_.find(kDisabledOtherEventsGroupName)->second;
} else {
- DCHECK(categories_status_.find(other_events_group_name) !=
+ DCHECK(categories_status_.find(kOtherEventsGroupName) !=
categories_status_.end());
- return categories_status_.find(other_events_group_name)->second;
+ return categories_status_.find(kOtherEventsGroupName)->second;
}
}
+// static
+void TraceEventETWExport::UpdateETWKeyword() {
+ if (!IsETWExportEnabled())
+ return;
+ auto* instance = GetInstance();
+ DCHECK(instance);
+ instance->UpdateEnabledCategories();
+}
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.h b/chromium/base/trace_event/trace_event_etw_export_win.h
index 9f73d781f73..7a1c02965d5 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.h
+++ b/chromium/base/trace_event/trace_event_etw_export_win.h
@@ -12,11 +12,11 @@
#include "base/strings/string_piece.h"
#include "base/trace_event/trace_event_impl.h"
-// Fwd.
+namespace base {
+
template <typename Type>
struct StaticMemorySingletonTraits;
-namespace base {
namespace trace_event {
class BASE_EXPORT TraceEventETWExport {
@@ -67,6 +67,8 @@ class BASE_EXPORT TraceEventETWExport {
private:
// Ensure only the provider can construct us.
friend struct StaticMemorySingletonTraits<TraceEventETWExport>;
+ // To have access to UpdateKeyword().
+ class ETWKeywordUpdateThread;
TraceEventETWExport();
// Updates the list of enabled categories by consulting the ETW keyword.
@@ -76,15 +78,24 @@ class BASE_EXPORT TraceEventETWExport {
// Returns true if the category is enabled.
bool IsCategoryEnabled(const char* category_name) const;
+ // Called back by the update thread to check for potential changes to the
+ // keyword.
+ static void UpdateETWKeyword();
+
// True if ETW is enabled. Allows hiding the exporting behind a flag.
bool etw_export_enabled_;
// Maps category names to their status (enabled/disabled).
- std::map<base::StringPiece, bool> categories_status_;
+ std::map<StringPiece, bool> categories_status_;
// Local copy of the ETW keyword.
uint64 etw_match_any_keyword_;
+ // Background thread that monitors changes to the ETW keyword and updates
+ // the enabled categories when a change occurs.
+ scoped_ptr<ETWKeywordUpdateThread> keyword_update_thread_;
+ PlatformThreadHandle keyword_update_thread_handle_;
+
DISALLOW_COPY_AND_ASSIGN(TraceEventETWExport);
};
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index 9d17f4f6f17..e78ee61b121 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -4,533 +4,21 @@
#include "base/trace_event/trace_event_impl.h"
-#include <algorithm>
-#include <cmath>
-
-#include "base/base_switches.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/debug/leak_annotations.h"
#include "base/format_macros.h"
#include "base/json/string_escape.h"
-#include "base/lazy_instance.h"
-#include "base/location.h"
-#include "base/memory/singleton.h"
-#include "base/process/process_metrics.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/synchronization/cancellation_flag.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/sys_info.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-#include "base/thread_task_runner_handle.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_id_name_manager.h"
-#include "base/threading/worker_pool.h"
-#include "base/time/time.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_synthetic_delay.h"
-
-#if defined(OS_WIN)
-#include "base/trace_event/trace_event_etw_export_win.h"
-#include "base/trace_event/trace_event_win.h"
-#endif
-
-class DeleteTraceLogForTesting {
- public:
- static void Delete() {
- Singleton<base::trace_event::TraceLog,
- LeakySingletonTraits<base::trace_event::TraceLog>>::OnExit(0);
- }
-};
-
-// The thread buckets for the sampling profiler.
-BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+#include "base/trace_event/trace_log.h"
namespace base {
namespace trace_event {
namespace {
-// The overhead of TraceEvent above this threshold will be reported in the
-// trace.
-const int kOverheadReportThresholdInMicroseconds = 50;
-
-// Controls the number of trace events we will buffer in-memory
-// before throwing them away.
-const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
-const size_t kTraceEventVectorBigBufferChunks =
- 512000000 / kTraceBufferChunkSize;
-const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
-const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
-const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
-// Can store results for 30 seconds with 1 ms sampling interval.
-const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
-// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
-const size_t kEchoToConsoleTraceEventBufferChunks = 256;
-
-const int kThreadFlushTimeoutMs = 3000;
-
-#if !defined(OS_NACL)
-// These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
-const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
-#endif
-
-#define MAX_CATEGORY_GROUPS 100
-
-// Parallel arrays g_category_groups and g_category_group_enabled are separate
-// so that a pointer to a member of g_category_group_enabled can be easily
-// converted to an index into g_category_groups. This allows macros to deal
-// only with char enabled pointers from g_category_group_enabled, and we can
-// convert internally to determine the category name from the char enabled
-// pointer.
-const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
- "toplevel",
- "tracing already shutdown",
- "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
- "__metadata",
- // For reporting trace_event overhead. For thread local event buffers only.
- "trace_event_overhead"};
-
-// The enabled flag is char instead of bool so that the API can be used from C.
-unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
-// Indexes here have to match the g_category_groups array indexes above.
-const int g_category_already_shutdown = 1;
-const int g_category_categories_exhausted = 2;
-const int g_category_metadata = 3;
-const int g_category_trace_event_overhead = 4;
-const int g_num_builtin_categories = 5;
-// Skip default categories.
-base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
-
-// The name of the current thread. This is used to decide if the current
-// thread name has changed. We combine all the seen thread names into the
-// output name for the thread.
-LazyInstance<ThreadLocalPointer<const char> >::Leaky
- g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
-
-ThreadTicks ThreadNow() {
- return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
-}
-
-class TraceBufferRingBuffer : public TraceBuffer {
- public:
- TraceBufferRingBuffer(size_t max_chunks)
- : max_chunks_(max_chunks),
- recyclable_chunks_queue_(new size_t[queue_capacity()]),
- queue_head_(0),
- queue_tail_(max_chunks),
- current_iteration_index_(0),
- current_chunk_seq_(1) {
- chunks_.reserve(max_chunks);
- for (size_t i = 0; i < max_chunks; ++i)
- recyclable_chunks_queue_[i] = i;
- }
-
- scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
- // Because the number of threads is much less than the number of chunks,
- // the queue should never be empty.
- DCHECK(!QueueIsEmpty());
-
- *index = recyclable_chunks_queue_[queue_head_];
- queue_head_ = NextQueueIndex(queue_head_);
- current_iteration_index_ = queue_head_;
-
- if (*index >= chunks_.size())
- chunks_.resize(*index + 1);
-
- TraceBufferChunk* chunk = chunks_[*index];
- chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
- if (chunk)
- chunk->Reset(current_chunk_seq_++);
- else
- chunk = new TraceBufferChunk(current_chunk_seq_++);
-
- return scoped_ptr<TraceBufferChunk>(chunk);
- }
-
- void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
- // When this method is called, the queue should not be full because it
- // can contain all chunks including the one to be returned.
- DCHECK(!QueueIsFull());
- DCHECK(chunk);
- DCHECK_LT(index, chunks_.size());
- DCHECK(!chunks_[index]);
- chunks_[index] = chunk.release();
- recyclable_chunks_queue_[queue_tail_] = index;
- queue_tail_ = NextQueueIndex(queue_tail_);
- }
-
- bool IsFull() const override { return false; }
-
- size_t Size() const override {
- // This is approximate because not all of the chunks are full.
- return chunks_.size() * kTraceBufferChunkSize;
- }
-
- size_t Capacity() const override {
- return max_chunks_ * kTraceBufferChunkSize;
- }
-
- TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
- if (handle.chunk_index >= chunks_.size())
- return NULL;
- TraceBufferChunk* chunk = chunks_[handle.chunk_index];
- if (!chunk || chunk->seq() != handle.chunk_seq)
- return NULL;
- return chunk->GetEventAt(handle.event_index);
- }
-
- const TraceBufferChunk* NextChunk() override {
- if (chunks_.empty())
- return NULL;
-
- while (current_iteration_index_ != queue_tail_) {
- size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
- current_iteration_index_ = NextQueueIndex(current_iteration_index_);
- if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
- continue;
- DCHECK(chunks_[chunk_index]);
- return chunks_[chunk_index];
- }
- return NULL;
- }
-
- scoped_ptr<TraceBuffer> CloneForIteration() const override {
- scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
- for (size_t queue_index = queue_head_; queue_index != queue_tail_;
- queue_index = NextQueueIndex(queue_index)) {
- size_t chunk_index = recyclable_chunks_queue_[queue_index];
- if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
- continue;
- TraceBufferChunk* chunk = chunks_[chunk_index];
- cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
- }
- return cloned_buffer.Pass();
- }
-
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- overhead->Add("TraceBufferRingBuffer", sizeof(*this));
- for (size_t queue_index = queue_head_; queue_index != queue_tail_;
- queue_index = NextQueueIndex(queue_index)) {
- size_t chunk_index = recyclable_chunks_queue_[queue_index];
- if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
- continue;
- chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
- }
- }
-
- private:
- class ClonedTraceBuffer : public TraceBuffer {
- public:
- ClonedTraceBuffer() : current_iteration_index_(0) {}
-
- // The only implemented method.
- const TraceBufferChunk* NextChunk() override {
- return current_iteration_index_ < chunks_.size() ?
- chunks_[current_iteration_index_++] : NULL;
- }
-
- scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
- NOTIMPLEMENTED();
- return scoped_ptr<TraceBufferChunk>();
- }
- void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
- NOTIMPLEMENTED();
- }
- bool IsFull() const override { return false; }
- size_t Size() const override { return 0; }
- size_t Capacity() const override { return 0; }
- TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
- return NULL;
- }
- scoped_ptr<TraceBuffer> CloneForIteration() const override {
- NOTIMPLEMENTED();
- return scoped_ptr<TraceBuffer>();
- }
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- NOTIMPLEMENTED();
- }
-
- size_t current_iteration_index_;
- ScopedVector<TraceBufferChunk> chunks_;
- };
-
- bool QueueIsEmpty() const {
- return queue_head_ == queue_tail_;
- }
-
- size_t QueueSize() const {
- return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
- queue_tail_ + queue_capacity() - queue_head_;
- }
-
- bool QueueIsFull() const {
- return QueueSize() == queue_capacity() - 1;
- }
-
- size_t queue_capacity() const {
- // One extra space to help distinguish full state and empty state.
- return max_chunks_ + 1;
- }
-
- size_t NextQueueIndex(size_t index) const {
- index++;
- if (index >= queue_capacity())
- index = 0;
- return index;
- }
-
- size_t max_chunks_;
- ScopedVector<TraceBufferChunk> chunks_;
-
- scoped_ptr<size_t[]> recyclable_chunks_queue_;
- size_t queue_head_;
- size_t queue_tail_;
-
- size_t current_iteration_index_;
- uint32 current_chunk_seq_;
-
- DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
-};
-
-class TraceBufferVector : public TraceBuffer {
- public:
- TraceBufferVector(size_t max_chunks)
- : in_flight_chunk_count_(0),
- current_iteration_index_(0),
- max_chunks_(max_chunks) {
- chunks_.reserve(max_chunks_);
- }
-
- scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
- // This function may be called when adding normal events or indirectly from
- // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
- // have to add the metadata events and flush thread-local buffers even if
- // the buffer is full.
- *index = chunks_.size();
- chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
- ++in_flight_chunk_count_;
- // + 1 because zero chunk_seq is not allowed.
- return scoped_ptr<TraceBufferChunk>(
- new TraceBufferChunk(static_cast<uint32>(*index) + 1));
- }
-
- void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
- DCHECK_GT(in_flight_chunk_count_, 0u);
- DCHECK_LT(index, chunks_.size());
- DCHECK(!chunks_[index]);
- --in_flight_chunk_count_;
- chunks_[index] = chunk.release();
- }
-
- bool IsFull() const override { return chunks_.size() >= max_chunks_; }
-
- size_t Size() const override {
- // This is approximate because not all of the chunks are full.
- return chunks_.size() * kTraceBufferChunkSize;
- }
-
- size_t Capacity() const override {
- return max_chunks_ * kTraceBufferChunkSize;
- }
-
- TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
- if (handle.chunk_index >= chunks_.size())
- return NULL;
- TraceBufferChunk* chunk = chunks_[handle.chunk_index];
- if (!chunk || chunk->seq() != handle.chunk_seq)
- return NULL;
- return chunk->GetEventAt(handle.event_index);
- }
-
- const TraceBufferChunk* NextChunk() override {
- while (current_iteration_index_ < chunks_.size()) {
- // Skip in-flight chunks.
- const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
- if (chunk)
- return chunk;
- }
- return NULL;
- }
-
- scoped_ptr<TraceBuffer> CloneForIteration() const override {
- NOTIMPLEMENTED();
- return scoped_ptr<TraceBuffer>();
- }
-
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- const size_t chunks_ptr_vector_allocated_size =
- sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
- const size_t chunks_ptr_vector_resident_size =
- sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
- overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
- chunks_ptr_vector_resident_size);
- for (size_t i = 0; i < chunks_.size(); ++i) {
- TraceBufferChunk* chunk = chunks_[i];
- // Skip the in-flight (nullptr) chunks. They will be accounted by the
- // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
- if (chunk)
- chunk->EstimateTraceMemoryOverhead(overhead);
- }
- }
-
- private:
- size_t in_flight_chunk_count_;
- size_t current_iteration_index_;
- size_t max_chunks_;
- ScopedVector<TraceBufferChunk> chunks_;
-
- DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
-};
-
-template <typename T>
-void InitializeMetadataEvent(TraceEvent* trace_event,
- int thread_id,
- const char* metadata_name, const char* arg_name,
- const T& value) {
- if (!trace_event)
- return;
-
- int num_args = 1;
- unsigned char arg_type;
- unsigned long long arg_value;
- ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
- trace_event->Initialize(thread_id,
- TraceTicks(), ThreadTicks(),
- TRACE_EVENT_PHASE_METADATA,
- &g_category_group_enabled[g_category_metadata],
- metadata_name, ::trace_event_internal::kNoEventId,
- num_args, &arg_name, &arg_type, &arg_value, NULL,
- TRACE_EVENT_FLAG_NONE);
-}
-
-class AutoThreadLocalBoolean {
- public:
- explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
- : thread_local_boolean_(thread_local_boolean) {
- DCHECK(!thread_local_boolean_->Get());
- thread_local_boolean_->Set(true);
- }
- ~AutoThreadLocalBoolean() {
- thread_local_boolean_->Set(false);
- }
-
- private:
- ThreadLocalBoolean* thread_local_boolean_;
- DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
-};
-
-} // namespace
-
-TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {
-}
-
-TraceBufferChunk::~TraceBufferChunk() {
-}
-
-void TraceBufferChunk::Reset(uint32 new_seq) {
- for (size_t i = 0; i < next_free_; ++i)
- chunk_[i].Reset();
- next_free_ = 0;
- seq_ = new_seq;
- cached_overhead_estimate_when_full_.reset();
-}
-
-TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
- DCHECK(!IsFull());
- *event_index = next_free_++;
- return &chunk_[*event_index];
-}
-
-scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
- scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
- cloned_chunk->next_free_ = next_free_;
- for (size_t i = 0; i < next_free_; ++i)
- cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
- return cloned_chunk.Pass();
-}
-
-void TraceBufferChunk::EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) {
- if (cached_overhead_estimate_when_full_) {
- DCHECK(IsFull());
- overhead->Update(*cached_overhead_estimate_when_full_);
- return;
- }
-
- // Cache the memory overhead estimate only if the chunk is full.
- TraceEventMemoryOverhead* estimate = overhead;
- if (IsFull()) {
- cached_overhead_estimate_when_full_.reset(new TraceEventMemoryOverhead);
- estimate = cached_overhead_estimate_when_full_.get();
- }
-
- estimate->Add("TraceBufferChunk", sizeof(*this));
- for (size_t i = 0; i < next_free_; ++i)
- chunk_[i].EstimateTraceMemoryOverhead(estimate);
-
- if (IsFull()) {
- estimate->AddSelf();
- overhead->Update(*estimate);
- }
-}
-
-// A helper class that allows the lock to be acquired in the middle of the scope
-// and unlocks at the end of scope if locked.
-class TraceLog::OptionalAutoLock {
- public:
- explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
-
- ~OptionalAutoLock() {
- if (locked_)
- lock_->Release();
- }
-
- void EnsureAcquired() {
- if (!locked_) {
- lock_->Acquire();
- locked_ = true;
- }
- }
-
- private:
- Lock* lock_;
- bool locked_;
- DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
-};
-
-// Use this function instead of TraceEventHandle constructor to keep the
-// overhead of ScopedTracer (trace_event.h) constructor minimum.
-void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
- TraceEventHandle* handle) {
- DCHECK(chunk_seq);
- DCHECK(chunk_index < (1u << 16));
- DCHECK(event_index < (1u << 16));
- handle->chunk_seq = chunk_seq;
- handle->chunk_index = static_cast<uint16>(chunk_index);
- handle->event_index = static_cast<uint16>(event_index);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceEvent
-//
-////////////////////////////////////////////////////////////////////////////////
-
-namespace {
-
size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
// Copies |*member| into |*buffer|, sets |*member| to point to this new
@@ -569,6 +57,7 @@ void TraceEvent::CopyFrom(const TraceEvent& other) {
thread_timestamp_ = other.thread_timestamp_;
duration_ = other.duration_;
id_ = other.id_;
+ context_id_ = other.context_id_;
category_group_enabled_ = other.category_group_enabled_;
name_ = other.name_;
thread_id_ = other.thread_id_;
@@ -592,21 +81,25 @@ void TraceEvent::Initialize(
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
+ unsigned long long bind_id,
int num_args,
const char** arg_names,
const unsigned char* arg_types,
const unsigned long long* arg_values,
const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags) {
+ unsigned int flags) {
timestamp_ = timestamp;
thread_timestamp_ = thread_timestamp;
duration_ = TimeDelta::FromInternalValue(-1);
id_ = id;
+ context_id_ = context_id;
category_group_enabled_ = category_group_enabled;
name_ = name;
thread_id_ = thread_id;
phase_ = phase;
flags_ = flags;
+ bind_id_ = bind_id;
// Clamp num_args since it may have been set by a third_party library.
num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
@@ -678,7 +171,6 @@ void TraceEvent::Reset() {
parameter_copy_storage_ = NULL;
for (int i = 0; i < kTraceMaxNumArgs; ++i)
convertable_values_[i] = NULL;
- cached_memory_overhead_estimate_.reset();
}
void TraceEvent::UpdateDuration(const TraceTicks& now,
@@ -690,25 +182,18 @@ void TraceEvent::UpdateDuration(const TraceTicks& now,
void TraceEvent::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- if (!cached_memory_overhead_estimate_) {
- cached_memory_overhead_estimate_.reset(new TraceEventMemoryOverhead);
- cached_memory_overhead_estimate_->Add("TraceEvent", sizeof(*this));
- // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory,
- // could be shared by several events and we might overcount. In practice
- // this is unlikely but it's worth checking.
- if (parameter_copy_storage_) {
- cached_memory_overhead_estimate_->AddRefCountedString(
- *parameter_copy_storage_.get());
- }
- for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
- if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
- convertable_values_[i]->EstimateTraceMemoryOverhead(
- cached_memory_overhead_estimate_.get());
- }
- }
- cached_memory_overhead_estimate_->AddSelf();
+ overhead->Add("TraceEvent", sizeof(*this));
+
+ // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory,
+ // could be shared by several events and we might overcount. In practice
+ // this is unlikely but it's worth checking.
+ if (parameter_copy_storage_)
+ overhead->AddRefCountedString(*parameter_copy_storage_.get());
+
+ for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i]->EstimateTraceMemoryOverhead(overhead);
}
- overhead->Update(*cached_memory_overhead_estimate_);
}
// static
@@ -848,6 +333,21 @@ void TraceEvent::AppendAsJSON(
if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
StringAppendF(out, ",\"bp\":\"e\"");
+ if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
+ (flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
+ StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
+ static_cast<uint64>(bind_id_));
+ }
+ if (flags_ & TRACE_EVENT_FLAG_FLOW_IN)
+ StringAppendF(out, ",\"flow_in\":true");
+ if (flags_ & TRACE_EVENT_FLAG_FLOW_OUT)
+ StringAppendF(out, ",\"flow_out\":true");
+
+ // Similar to id_, print the context_id as hex if present.
+ if (flags_ & TRACE_EVENT_FLAG_HAS_CONTEXT_ID)
+ StringAppendF(out, ",\"cid\":\"0x%" PRIx64 "\"",
+ static_cast<uint64>(context_id_));
+
// Instant events also output their scope.
if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
char scope = '?';
@@ -893,1637 +393,5 @@ void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
}
}
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceResultBuffer
-//
-////////////////////////////////////////////////////////////////////////////////
-
-TraceResultBuffer::OutputCallback
- TraceResultBuffer::SimpleOutput::GetCallback() {
- return Bind(&SimpleOutput::Append, Unretained(this));
-}
-
-void TraceResultBuffer::SimpleOutput::Append(
- const std::string& json_trace_output) {
- json_output += json_trace_output;
-}
-
-TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
-}
-
-TraceResultBuffer::~TraceResultBuffer() {
-}
-
-void TraceResultBuffer::SetOutputCallback(
- const OutputCallback& json_chunk_callback) {
- output_callback_ = json_chunk_callback;
-}
-
-void TraceResultBuffer::Start() {
- append_comma_ = false;
- output_callback_.Run("[");
-}
-
-void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
- if (append_comma_)
- output_callback_.Run(",");
- append_comma_ = true;
- output_callback_.Run(trace_fragment);
-}
-
-void TraceResultBuffer::Finish() {
- output_callback_.Run("]");
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceSamplingThread
-//
-////////////////////////////////////////////////////////////////////////////////
-class TraceBucketData;
-typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
-
-class TraceBucketData {
- public:
- TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback);
- ~TraceBucketData();
-
- TRACE_EVENT_API_ATOMIC_WORD* bucket;
- const char* bucket_name;
- TraceSampleCallback callback;
-};
-
-// This object must be created on the IO thread.
-class TraceSamplingThread : public PlatformThread::Delegate {
- public:
- TraceSamplingThread();
- ~TraceSamplingThread() override;
-
- // Implementation of PlatformThread::Delegate:
- void ThreadMain() override;
-
- static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
-
- void Stop();
- void WaitSamplingEventForTesting();
-
- private:
- friend class TraceLog;
-
- void GetSamples();
- // Not thread-safe. Once the ThreadMain has been called, this can no longer
- // be called.
- void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback);
- // Splits a combined "category\0name" into the two component parts.
- static void ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name);
- std::vector<TraceBucketData> sample_buckets_;
- bool thread_running_;
- CancellationFlag cancellation_flag_;
- WaitableEvent waitable_event_for_testing_;
-};
-
-
-TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false),
- waitable_event_for_testing_(false, false) {
-}
-
-TraceSamplingThread::~TraceSamplingThread() {
-}
-
-void TraceSamplingThread::ThreadMain() {
- PlatformThread::SetName("Sampling Thread");
- thread_running_ = true;
- const int kSamplingFrequencyMicroseconds = 1000;
- while (!cancellation_flag_.IsSet()) {
- PlatformThread::Sleep(
- TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
- GetSamples();
- waitable_event_for_testing_.Signal();
- }
-}
-
-// static
-void TraceSamplingThread::DefaultSamplingCallback(
- TraceBucketData* bucket_data) {
- TRACE_EVENT_API_ATOMIC_WORD category_and_name =
- TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
- if (!category_and_name)
- return;
- const char* const combined =
- reinterpret_cast<const char* const>(category_and_name);
- const char* category_group;
- const char* name;
- ExtractCategoryAndName(combined, &category_group, &name);
- TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
- TraceLog::GetCategoryGroupEnabled(category_group),
- name, 0, 0, NULL, NULL, NULL, NULL, 0);
-}
-
-void TraceSamplingThread::GetSamples() {
- for (size_t i = 0; i < sample_buckets_.size(); ++i) {
- TraceBucketData* bucket_data = &sample_buckets_[i];
- bucket_data->callback.Run(bucket_data);
- }
-}
-
-void TraceSamplingThread::RegisterSampleBucket(
- TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback) {
- // Access to sample_buckets_ doesn't cause races with the sampling thread
- // that uses the sample_buckets_, because it is guaranteed that
- // RegisterSampleBucket is called before the sampling thread is created.
- DCHECK(!thread_running_);
- sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
-}
-
-// static
-void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name) {
- *category = combined;
- *name = &combined[strlen(combined) + 1];
-}
-
-void TraceSamplingThread::Stop() {
- cancellation_flag_.Set();
-}
-
-void TraceSamplingThread::WaitSamplingEventForTesting() {
- waitable_event_for_testing_.Wait();
-}
-
-TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback)
- : bucket(bucket),
- bucket_name(name),
- callback(callback) {
-}
-
-TraceBucketData::~TraceBucketData() {
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceLog
-//
-////////////////////////////////////////////////////////////////////////////////
-
-class TraceLog::ThreadLocalEventBuffer
- : public MessageLoop::DestructionObserver,
- public MemoryDumpProvider {
- public:
- ThreadLocalEventBuffer(TraceLog* trace_log);
- ~ThreadLocalEventBuffer() override;
-
- TraceEvent* AddTraceEvent(TraceEventHandle* handle);
-
- void ReportOverhead(const TraceTicks& event_timestamp,
- const ThreadTicks& event_thread_timestamp);
-
- TraceEvent* GetEventByHandle(TraceEventHandle handle) {
- if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
- handle.chunk_index != chunk_index_)
- return NULL;
-
- return chunk_->GetEventAt(handle.event_index);
- }
-
- int generation() const { return generation_; }
-
- private:
- // MessageLoop::DestructionObserver
- void WillDestroyCurrentMessageLoop() override;
-
- // MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
-
- void FlushWhileLocked();
-
- void CheckThisIsCurrentBuffer() const {
- DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
- }
-
- // Since TraceLog is a leaky singleton, trace_log_ will always be valid
- // as long as the thread exists.
- TraceLog* trace_log_;
- scoped_ptr<TraceBufferChunk> chunk_;
- size_t chunk_index_;
- int event_count_;
- TimeDelta overhead_;
- int generation_;
-
- DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
-};
-
-TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
- : trace_log_(trace_log),
- chunk_index_(0),
- event_count_(0),
- generation_(trace_log->generation()) {
- // ThreadLocalEventBuffer is created only if the thread has a message loop, so
- // the following message_loop won't be NULL.
- MessageLoop* message_loop = MessageLoop::current();
- message_loop->AddDestructionObserver(this);
-
- // This is to report the local memory usage when memory-infra is enabled.
- MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, ThreadTaskRunnerHandle::Get());
-
- AutoLock lock(trace_log->lock_);
- trace_log->thread_message_loops_.insert(message_loop);
-}
-
-TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
- CheckThisIsCurrentBuffer();
- MessageLoop::current()->RemoveDestructionObserver(this);
- MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
-
- // Zero event_count_ happens in either of the following cases:
- // - no event generated for the thread;
- // - the thread has no message loop;
- // - trace_event_overhead is disabled.
- if (event_count_) {
- InitializeMetadataEvent(AddTraceEvent(NULL),
- static_cast<int>(base::PlatformThread::CurrentId()),
- "overhead", "average_overhead",
- overhead_.InMillisecondsF() / event_count_);
- }
-
- {
- AutoLock lock(trace_log_->lock_);
- FlushWhileLocked();
- trace_log_->thread_message_loops_.erase(MessageLoop::current());
- }
- trace_log_->thread_local_event_buffer_.Set(NULL);
-}
-
-TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
- TraceEventHandle* handle) {
- CheckThisIsCurrentBuffer();
-
- if (chunk_ && chunk_->IsFull()) {
- AutoLock lock(trace_log_->lock_);
- FlushWhileLocked();
- chunk_.reset();
- }
- if (!chunk_) {
- AutoLock lock(trace_log_->lock_);
- chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
- trace_log_->CheckIfBufferIsFullWhileLocked();
- }
- if (!chunk_)
- return NULL;
-
- size_t event_index;
- TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
- if (trace_event && handle)
- MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
-
- return trace_event;
-}
-
-void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
- const TraceTicks& event_timestamp,
- const ThreadTicks& event_thread_timestamp) {
- if (!g_category_group_enabled[g_category_trace_event_overhead])
- return;
-
- CheckThisIsCurrentBuffer();
-
- event_count_++;
- ThreadTicks thread_now = ThreadNow();
- TraceTicks now = trace_log_->OffsetNow();
- TimeDelta overhead = now - event_timestamp;
- if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
- TraceEvent* trace_event = AddTraceEvent(NULL);
- if (trace_event) {
- trace_event->Initialize(
- static_cast<int>(PlatformThread::CurrentId()),
- event_timestamp, event_thread_timestamp,
- TRACE_EVENT_PHASE_COMPLETE,
- &g_category_group_enabled[g_category_trace_event_overhead],
- "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
- trace_event->UpdateDuration(now, thread_now);
- }
- }
- overhead_ += overhead;
-}
-
-void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
- delete this;
-}
-
-bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(ProcessMemoryDump* pmd) {
- if (!chunk_)
- return true;
- std::string dump_base_name = StringPrintf(
- "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
- TraceEventMemoryOverhead overhead;
- chunk_->EstimateTraceMemoryOverhead(&overhead);
- overhead.DumpInto(dump_base_name.c_str(), pmd);
- return true;
-}
-
-void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
- if (!chunk_)
- return;
-
- trace_log_->lock_.AssertAcquired();
- if (trace_log_->CheckGeneration(generation_)) {
- // Return the chunk to the buffer only if the generation matches.
- trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
- }
- // Otherwise this method may be called from the destructor, or TraceLog will
- // find the generation mismatch and delete this buffer soon.
-}
-
-TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
-}
-
-TraceLogStatus::~TraceLogStatus() {
-}
-
-// static
-TraceLog* TraceLog::GetInstance() {
- return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
-}
-
-TraceLog::TraceLog()
- : mode_(DISABLED),
- num_traces_recorded_(0),
- event_callback_(0),
- dispatching_to_observer_list_(false),
- process_sort_index_(0),
- process_id_hash_(0),
- process_id_(0),
- watch_category_(0),
- trace_options_(kInternalRecordUntilFull),
- sampling_thread_handle_(0),
- trace_config_(TraceConfig()),
- event_callback_trace_config_(TraceConfig()),
- thread_shared_chunk_index_(0),
- generation_(0),
- use_worker_thread_(false) {
- // Trace is enabled or disabled on one thread while other threads are
- // accessing the enabled flag. We don't care whether edge-case events are
- // traced or not, so we allow races on the enabled flag to keep the trace
- // macros fast.
- // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
- // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
- // sizeof(g_category_group_enabled),
- // "trace_event category enabled");
- for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
- ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
- "trace_event category enabled");
- }
-#if defined(OS_NACL) // NaCl shouldn't expose the process id.
- SetProcessID(0);
-#else
- SetProcessID(static_cast<int>(GetCurrentProcId()));
-
- // NaCl also shouldn't access the command line.
- if (CommandLine::InitializedForCurrentProcess() &&
- CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
- std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kTraceToConsole);
- if (filter.empty()) {
- filter = kEchoToConsoleCategoryFilter;
- } else {
- filter.append(",");
- filter.append(kEchoToConsoleCategoryFilter);
- }
-
- LOG(ERROR) << "Start " << switches::kTraceToConsole
- << " with CategoryFilter '" << filter << "'.";
- SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE);
- }
-#endif
-
- logged_events_.reset(CreateTraceBuffer());
-
- MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
-}
-
-TraceLog::~TraceLog() {
-}
-
-void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
- // A ThreadLocalEventBuffer needs the message loop
- // - to know when the thread exits;
- // - to handle the final flush.
- // For a thread without a message loop or the message loop may be blocked, the
- // trace events will be added into the main buffer directly.
- if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
- return;
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
- if (thread_local_event_buffer &&
- !CheckGeneration(thread_local_event_buffer->generation())) {
- delete thread_local_event_buffer;
- thread_local_event_buffer = NULL;
- }
- if (!thread_local_event_buffer) {
- thread_local_event_buffer = new ThreadLocalEventBuffer(this);
- thread_local_event_buffer_.Set(thread_local_event_buffer);
- }
-}
-
-bool TraceLog::OnMemoryDump(ProcessMemoryDump* pmd) {
- TraceEventMemoryOverhead overhead;
- overhead.Add("TraceLog", sizeof(*this));
- {
- AutoLock lock(lock_);
- if (logged_events_)
- logged_events_->EstimateTraceMemoryOverhead(&overhead);
- }
- overhead.AddSelf();
- overhead.DumpInto("tracing/main_trace_log", pmd);
- return true;
-}
-
-const unsigned char* TraceLog::GetCategoryGroupEnabled(
- const char* category_group) {
- TraceLog* tracelog = GetInstance();
- if (!tracelog) {
- DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
- return &g_category_group_enabled[g_category_already_shutdown];
- }
- return tracelog->GetCategoryGroupEnabledInternal(category_group);
-}
-
-const char* TraceLog::GetCategoryGroupName(
- const unsigned char* category_group_enabled) {
- // Calculate the index of the category group by finding
- // category_group_enabled in g_category_group_enabled array.
- uintptr_t category_begin =
- reinterpret_cast<uintptr_t>(g_category_group_enabled);
- uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
- DCHECK(category_ptr >= category_begin &&
- category_ptr < reinterpret_cast<uintptr_t>(
- g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
- "out of bounds category pointer";
- uintptr_t category_index =
- (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
- return g_category_groups[category_index];
-}
-
-void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
- unsigned char enabled_flag = 0;
- const char* category_group = g_category_groups[category_index];
- if (mode_ == RECORDING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group))
- enabled_flag |= ENABLED_FOR_RECORDING;
- else if (mode_ == MONITORING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group))
- enabled_flag |= ENABLED_FOR_MONITORING;
- if (event_callback_ &&
- event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
- enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
-#if defined(OS_WIN)
- if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- enabled_flag |= ENABLED_FOR_ETW_EXPORT;
- }
-#endif
-
- g_category_group_enabled[category_index] = enabled_flag;
-}
-
-void TraceLog::UpdateCategoryGroupEnabledFlags() {
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = 0; i < category_index; i++)
- UpdateCategoryGroupEnabledFlag(i);
-}
-
-void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
- ResetTraceEventSyntheticDelays();
- const TraceConfig::StringList& delays =
- trace_config_.GetSyntheticDelayValues();
- TraceConfig::StringList::const_iterator ci;
- for (ci = delays.begin(); ci != delays.end(); ++ci) {
- StringTokenizer tokens(*ci, ";");
- if (!tokens.GetNext())
- continue;
- TraceEventSyntheticDelay* delay =
- TraceEventSyntheticDelay::Lookup(tokens.token());
- while (tokens.GetNext()) {
- std::string token = tokens.token();
- char* duration_end;
- double target_duration = strtod(token.c_str(), &duration_end);
- if (duration_end != token.c_str()) {
- delay->SetTargetDuration(TimeDelta::FromMicroseconds(
- static_cast<int64>(target_duration * 1e6)));
- } else if (token == "static") {
- delay->SetMode(TraceEventSyntheticDelay::STATIC);
- } else if (token == "oneshot") {
- delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
- } else if (token == "alternating") {
- delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
- }
- }
- }
-}
-
-const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
- const char* category_group) {
- DCHECK(!strchr(category_group, '"')) <<
- "Category groups may not contain double quote";
- // The g_category_groups is append only, avoid using a lock for the fast path.
- size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
-
- // Search for pre-existing category group.
- for (size_t i = 0; i < current_category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- unsigned char* category_group_enabled = NULL;
- // This is the slow path: the lock is not held in the case above, so more
- // than one thread could have reached here trying to add the same category.
- // Only hold to lock when actually appending a new category, and
- // check the categories groups again.
- AutoLock lock(lock_);
- size_t category_index = base::subtle::Acquire_Load(&g_category_index);
- for (size_t i = 0; i < category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- // Create a new category group.
- DCHECK(category_index < MAX_CATEGORY_GROUPS) <<
- "must increase MAX_CATEGORY_GROUPS";
- if (category_index < MAX_CATEGORY_GROUPS) {
- // Don't hold on to the category_group pointer, so that we can create
- // category groups with strings not known at compile time (this is
- // required by SetWatchEvent).
- const char* new_group = strdup(category_group);
- ANNOTATE_LEAKING_OBJECT_PTR(new_group);
- g_category_groups[category_index] = new_group;
- DCHECK(!g_category_group_enabled[category_index]);
- // Note that if both included and excluded patterns in the
- // TraceConfig are empty, we exclude nothing,
- // thereby enabling this category group.
- UpdateCategoryGroupEnabledFlag(category_index);
- category_group_enabled = &g_category_group_enabled[category_index];
- // Update the max index now.
- base::subtle::Release_Store(&g_category_index, category_index + 1);
- } else {
- category_group_enabled =
- &g_category_group_enabled[g_category_categories_exhausted];
- }
- return category_group_enabled;
-}
-
-void TraceLog::GetKnownCategoryGroups(
- std::vector<std::string>* category_groups) {
- AutoLock lock(lock_);
- category_groups->push_back(
- g_category_groups[g_category_trace_event_overhead]);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = g_num_builtin_categories; i < category_index; i++)
- category_groups->push_back(g_category_groups[i]);
-}
-
-void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
- std::vector<EnabledStateObserver*> observer_list;
- {
- AutoLock lock(lock_);
-
- // Can't enable tracing when Flush() is in progress.
- DCHECK(!flush_task_runner_);
-
- InternalTraceOptions new_options =
- GetInternalOptionsFromTraceConfig(trace_config);
-
- InternalTraceOptions old_options = trace_options();
-
- if (IsEnabled()) {
- if (new_options != old_options) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different "
- << "set of options.";
- }
-
- if (mode != mode_) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
- }
-
- trace_config_.Merge(trace_config);
- UpdateCategoryGroupEnabledFlags();
- return;
- }
-
- if (dispatching_to_observer_list_) {
- DLOG(ERROR) <<
- "Cannot manipulate TraceLog::Enabled state from an observer.";
- return;
- }
-
- mode_ = mode;
-
- if (new_options != old_options) {
- subtle::NoBarrier_Store(&trace_options_, new_options);
- UseNextTraceBuffer();
- }
-
- num_traces_recorded_++;
-
- trace_config_ = TraceConfig(trace_config);
- UpdateCategoryGroupEnabledFlags();
- UpdateSyntheticDelaysFromTraceConfig();
-
- if (new_options & kInternalEnableSampling) {
- sampling_thread_.reset(new TraceSamplingThread);
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[0],
- "bucket0",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[1],
- "bucket1",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[2],
- "bucket2",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- if (!PlatformThread::Create(
- 0, sampling_thread_.get(), &sampling_thread_handle_)) {
- DCHECK(false) << "failed to create thread";
- }
- }
-
- dispatching_to_observer_list_ = true;
- observer_list = enabled_state_observer_list_;
- }
- // Notify observers outside the lock in case they trigger trace events.
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogEnabled();
-
- {
- AutoLock lock(lock_);
- dispatching_to_observer_list_ = false;
- }
-}
-
-void TraceLog::SetArgumentFilterPredicate(
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) {
- AutoLock lock(lock_);
- DCHECK(!argument_filter_predicate.is_null());
- DCHECK(argument_filter_predicate_.is_null());
- argument_filter_predicate_ = argument_filter_predicate;
-}
-
-TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
- const TraceConfig& config) {
- InternalTraceOptions ret =
- config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
- if (config.IsArgumentFilterEnabled())
- ret |= kInternalEnableArgumentFilter;
- switch (config.GetTraceRecordMode()) {
- case RECORD_UNTIL_FULL:
- return ret | kInternalRecordUntilFull;
- case RECORD_CONTINUOUSLY:
- return ret | kInternalRecordContinuously;
- case ECHO_TO_CONSOLE:
- return ret | kInternalEchoToConsole;
- case RECORD_AS_MUCH_AS_POSSIBLE:
- return ret | kInternalRecordAsMuchAsPossible;
- }
- NOTREACHED();
- return kInternalNone;
-}
-
-TraceConfig TraceLog::GetCurrentTraceConfig() const {
- AutoLock lock(lock_);
- return trace_config_;
-}
-
-void TraceLog::SetDisabled() {
- AutoLock lock(lock_);
- SetDisabledWhileLocked();
-}
-
-void TraceLog::SetDisabledWhileLocked() {
- lock_.AssertAcquired();
-
- if (!IsEnabled())
- return;
-
- if (dispatching_to_observer_list_) {
- DLOG(ERROR)
- << "Cannot manipulate TraceLog::Enabled state from an observer.";
- return;
- }
-
- mode_ = DISABLED;
-
- if (sampling_thread_.get()) {
- // Stop the sampling thread.
- sampling_thread_->Stop();
- lock_.Release();
- PlatformThread::Join(sampling_thread_handle_);
- lock_.Acquire();
- sampling_thread_handle_ = PlatformThreadHandle();
- sampling_thread_.reset();
- }
-
- trace_config_.Clear();
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- UpdateCategoryGroupEnabledFlags();
- AddMetadataEventsWhileLocked();
-
- dispatching_to_observer_list_ = true;
- std::vector<EnabledStateObserver*> observer_list =
- enabled_state_observer_list_;
-
- {
- // Dispatch to observers outside the lock in case the observer triggers a
- // trace event.
- AutoUnlock unlock(lock_);
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogDisabled();
- }
- dispatching_to_observer_list_ = false;
-}
-
-int TraceLog::GetNumTracesRecorded() {
- AutoLock lock(lock_);
- if (!IsEnabled())
- return -1;
- return num_traces_recorded_;
-}
-
-void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
- enabled_state_observer_list_.push_back(listener);
-}
-
-void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
- std::vector<EnabledStateObserver*>::iterator it =
- std::find(enabled_state_observer_list_.begin(),
- enabled_state_observer_list_.end(),
- listener);
- if (it != enabled_state_observer_list_.end())
- enabled_state_observer_list_.erase(it);
-}
-
-bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
- std::vector<EnabledStateObserver*>::const_iterator it =
- std::find(enabled_state_observer_list_.begin(),
- enabled_state_observer_list_.end(),
- listener);
- return it != enabled_state_observer_list_.end();
-}
-
-TraceLogStatus TraceLog::GetStatus() const {
- AutoLock lock(lock_);
- TraceLogStatus result;
- result.event_capacity = logged_events_->Capacity();
- result.event_count = logged_events_->Size();
- return result;
-}
-
-bool TraceLog::BufferIsFull() const {
- AutoLock lock(lock_);
- return logged_events_->IsFull();
-}
-
-TraceBuffer* TraceLog::CreateTraceBuffer() {
- InternalTraceOptions options = trace_options();
- if (options & kInternalRecordContinuously)
- return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
- else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
- return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
- else if (options & kInternalEchoToConsole)
- return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
- else if (options & kInternalRecordAsMuchAsPossible)
- return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks);
- return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks);
-}
-
-TraceBuffer* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks) {
- return new TraceBufferVector(max_chunks);
-}
-
-TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
- TraceEventHandle* handle, bool check_buffer_is_full) {
- lock_.AssertAcquired();
-
- if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
- logged_events_->ReturnChunk(thread_shared_chunk_index_,
- thread_shared_chunk_.Pass());
- }
-
- if (!thread_shared_chunk_) {
- thread_shared_chunk_ = logged_events_->GetChunk(
- &thread_shared_chunk_index_);
- if (check_buffer_is_full)
- CheckIfBufferIsFullWhileLocked();
- }
- if (!thread_shared_chunk_)
- return NULL;
-
- size_t event_index;
- TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
- if (trace_event && handle) {
- MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
- event_index, handle);
- }
- return trace_event;
-}
-
-void TraceLog::CheckIfBufferIsFullWhileLocked() {
- lock_.AssertAcquired();
- if (logged_events_->IsFull()) {
- if (buffer_limit_reached_timestamp_.is_null()) {
- buffer_limit_reached_timestamp_ = OffsetNow();
- }
- SetDisabledWhileLocked();
- }
-}
-
-void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb) {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_,
- reinterpret_cast<subtle::AtomicWord>(cb));
- event_callback_trace_config_ = trace_config;
- UpdateCategoryGroupEnabledFlags();
-};
-
-void TraceLog::SetEventCallbackDisabled() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_, 0);
- UpdateCategoryGroupEnabledFlags();
-}
-
-// Flush() works as the following:
-// 1. Flush() is called in thread A whose task runner is saved in
-// flush_task_runner_;
-// 2. If thread_message_loops_ is not empty, thread A posts task to each message
-// loop to flush the thread local buffers; otherwise finish the flush;
-// 3. FlushCurrentThread() deletes the thread local event buffer:
-// - The last batch of events of the thread are flushed into the main buffer;
-// - The message loop will be removed from thread_message_loops_;
-// If this is the last message loop, finish the flush;
-// 4. If any thread hasn't finish its flush in time, finish the flush.
-void TraceLog::Flush(const TraceLog::OutputCallback& cb,
- bool use_worker_thread) {
- FlushInternal(cb, use_worker_thread, false);
-}
-
-void TraceLog::CancelTracing(const OutputCallback& cb) {
- SetDisabled();
- FlushInternal(cb, false, true);
-}
-
-void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
- bool use_worker_thread,
- bool discard_events) {
- use_worker_thread_ = use_worker_thread;
- if (IsEnabled()) {
- // Can't flush when tracing is enabled because otherwise PostTask would
- // - generate more trace events;
- // - deschedule the calling thread on some platforms causing inaccurate
- // timing of the trace events.
- scoped_refptr<RefCountedString> empty_result = new RefCountedString;
- if (!cb.is_null())
- cb.Run(empty_result, false);
- LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
- return;
- }
-
- int generation = this->generation();
- // Copy of thread_message_loops_ to be used without locking.
- std::vector<scoped_refptr<SingleThreadTaskRunner> >
- thread_message_loop_task_runners;
- {
- AutoLock lock(lock_);
- DCHECK(!flush_task_runner_);
- flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
- ? ThreadTaskRunnerHandle::Get()
- : nullptr;
- DCHECK_IMPLIES(thread_message_loops_.size(), flush_task_runner_);
- flush_output_callback_ = cb;
-
- if (thread_shared_chunk_) {
- logged_events_->ReturnChunk(thread_shared_chunk_index_,
- thread_shared_chunk_.Pass());
- }
-
- if (thread_message_loops_.size()) {
- for (hash_set<MessageLoop*>::const_iterator it =
- thread_message_loops_.begin();
- it != thread_message_loops_.end(); ++it) {
- thread_message_loop_task_runners.push_back((*it)->task_runner());
- }
- }
- }
-
- if (thread_message_loop_task_runners.size()) {
- for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
- thread_message_loop_task_runners[i]->PostTask(
- FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
- generation, discard_events));
- }
- flush_task_runner_->PostDelayedTask(
- FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
- discard_events),
- TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
- return;
- }
-
- FinishFlush(generation, discard_events);
-}
-
-// Usually it runs on a different thread.
-void TraceLog::ConvertTraceEventsToTraceFormat(
- scoped_ptr<TraceBuffer> logged_events,
- const OutputCallback& flush_output_callback,
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) {
- if (flush_output_callback.is_null())
- return;
-
- // The callback need to be called at least once even if there is no events
- // to let the caller know the completion of flush.
- scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
- while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
- for (size_t j = 0; j < chunk->size(); ++j) {
- size_t size = json_events_str_ptr->size();
- if (size > kTraceEventBufferSizeInBytes) {
- flush_output_callback.Run(json_events_str_ptr, true);
- json_events_str_ptr = new RefCountedString();
- } else if (size) {
- json_events_str_ptr->data().append(",\n");
- }
- chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
- argument_filter_predicate);
- }
- }
- flush_output_callback.Run(json_events_str_ptr, false);
-}
-
-void TraceLog::FinishFlush(int generation, bool discard_events) {
- scoped_ptr<TraceBuffer> previous_logged_events;
- OutputCallback flush_output_callback;
- TraceEvent::ArgumentFilterPredicate argument_filter_predicate;
-
- if (!CheckGeneration(generation))
- return;
-
- {
- AutoLock lock(lock_);
-
- previous_logged_events.swap(logged_events_);
- UseNextTraceBuffer();
- thread_message_loops_.clear();
-
- flush_task_runner_ = NULL;
- flush_output_callback = flush_output_callback_;
- flush_output_callback_.Reset();
-
- if (trace_options() & kInternalEnableArgumentFilter) {
- CHECK(!argument_filter_predicate_.is_null());
- argument_filter_predicate = argument_filter_predicate_;
- }
- }
-
- if (discard_events) {
- if (!flush_output_callback.is_null()) {
- scoped_refptr<RefCountedString> empty_result = new RefCountedString;
- flush_output_callback.Run(empty_result, false);
- }
- return;
- }
-
- if (use_worker_thread_ &&
- WorkerPool::PostTask(
- FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
- Passed(&previous_logged_events),
- flush_output_callback, argument_filter_predicate),
- true)) {
- return;
- }
-
- ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
- flush_output_callback,
- argument_filter_predicate);
-}
-
-// Run in each thread holding a local event buffer.
-void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
- {
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_) {
- // This is late. The corresponding flush has finished.
- return;
- }
- }
-
- // This will flush the thread local buffer.
- delete thread_local_event_buffer_.Get();
-
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_ ||
- thread_message_loops_.size())
- return;
-
- flush_task_runner_->PostTask(
- FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
- discard_events));
-}
-
-void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
- {
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_) {
- // Flush has finished before timeout.
- return;
- }
-
- LOG(WARNING) <<
- "The following threads haven't finished flush in time. "
- "If this happens stably for some thread, please call "
- "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
- "the thread to avoid its trace events from being lost.";
- for (hash_set<MessageLoop*>::const_iterator it =
- thread_message_loops_.begin();
- it != thread_message_loops_.end(); ++it) {
- LOG(WARNING) << "Thread: " << (*it)->thread_name();
- }
- }
- FinishFlush(generation, discard_events);
-}
-
-void TraceLog::FlushButLeaveBufferIntact(
- const TraceLog::OutputCallback& flush_output_callback) {
- scoped_ptr<TraceBuffer> previous_logged_events;
- TraceEvent::ArgumentFilterPredicate argument_filter_predicate;
- {
- AutoLock lock(lock_);
- AddMetadataEventsWhileLocked();
- if (thread_shared_chunk_) {
- // Return the chunk to the main buffer to flush the sampling data.
- logged_events_->ReturnChunk(thread_shared_chunk_index_,
- thread_shared_chunk_.Pass());
- }
- previous_logged_events = logged_events_->CloneForIteration().Pass();
-
- if (trace_options() & kInternalEnableArgumentFilter) {
- CHECK(!argument_filter_predicate_.is_null());
- argument_filter_predicate = argument_filter_predicate_;
- }
- } // release lock
-
- ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
- flush_output_callback,
- argument_filter_predicate);
-}
-
-void TraceLog::UseNextTraceBuffer() {
- logged_events_.reset(CreateTraceBuffer());
- subtle::NoBarrier_AtomicIncrement(&generation_, 1);
- thread_shared_chunk_.reset();
- thread_shared_chunk_index_ = 0;
-}
-
-TraceEventHandle TraceLog::AddTraceEvent(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags) {
- int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TraceTicks now = base::TraceTicks::Now();
- return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
- name, id, thread_id, now,
- num_args, arg_names,
- arg_types, arg_values,
- convertable_values, flags);
-}
-
-TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int thread_id,
- const TraceTicks& timestamp,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags) {
- TraceEventHandle handle = { 0, 0, 0 };
- if (!*category_group_enabled)
- return handle;
-
- // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
- // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
- // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
- if (thread_is_in_trace_event_.Get())
- return handle;
-
- AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
-
- DCHECK(name);
- DCHECK(!timestamp.is_null());
-
- if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
- id = MangleEventId(id);
-
- TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
- TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ?
- OffsetNow() : offset_event_timestamp;
- ThreadTicks thread_now = ThreadNow();
-
- // |thread_local_event_buffer_| can be null if the current thread doesn't have
- // a message loop or the message loop is blocked.
- InitializeThreadLocalEventBufferIfSupported();
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
-
- // Check and update the current thread name only if the event is for the
- // current thread to avoid locks in most cases.
- if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
- const char* new_name = ThreadIdNameManager::GetInstance()->
- GetName(thread_id);
- // Check if the thread name has been set or changed since the previous
- // call (if any), but don't bother if the new name is empty. Note this will
- // not detect a thread name change within the same char* buffer address: we
- // favor common case performance over corner case correctness.
- if (new_name != g_current_thread_name.Get().Get() &&
- new_name && *new_name) {
- g_current_thread_name.Get().Set(new_name);
-
- AutoLock thread_info_lock(thread_info_lock_);
-
- hash_map<int, std::string>::iterator existing_name =
- thread_names_.find(thread_id);
- if (existing_name == thread_names_.end()) {
- // This is a new thread id, and a new name.
- thread_names_[thread_id] = new_name;
- } else {
- // This is a thread id that we've seen before, but potentially with a
- // new name.
- std::vector<StringPiece> existing_names =
- base::SplitStringPiece(existing_name->second, ",",
- base::KEEP_WHITESPACE,
- base::SPLIT_WANT_NONEMPTY);
- bool found = std::find(existing_names.begin(),
- existing_names.end(),
- new_name) != existing_names.end();
- if (!found) {
- if (existing_names.size())
- existing_name->second.push_back(',');
- existing_name->second.append(new_name);
- }
- }
- }
- }
-
-#if defined(OS_WIN)
- // This is done sooner rather than later, to avoid creating the event and
- // acquiring the lock, which is not needed for ETW as it's already threadsafe.
- if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
- TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
- num_args, arg_names, arg_types, arg_values,
- convertable_values);
-#endif // OS_WIN
-
- std::string console_message;
- if (*category_group_enabled &
- (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
- OptionalAutoLock lock(&lock_);
-
- TraceEvent* trace_event = NULL;
- if (thread_local_event_buffer) {
- trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
- } else {
- lock.EnsureAcquired();
- trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
- }
-
- if (trace_event) {
- trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
- phase, category_group_enabled, name, id,
- num_args, arg_names, arg_types, arg_values,
- convertable_values, flags);
-
-#if defined(OS_ANDROID)
- trace_event->SendToATrace();
-#endif
- }
-
- if (trace_options() & kInternalEchoToConsole) {
- console_message = EventToConsoleMessage(
- phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
- timestamp, trace_event);
- }
- }
-
- if (console_message.size())
- LOG(ERROR) << console_message;
-
- if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
- &watch_category_)) == category_group_enabled) {
- bool event_name_matches;
- WatchEventCallback watch_event_callback_copy;
- {
- AutoLock lock(lock_);
- event_name_matches = watch_event_name_ == name;
- watch_event_callback_copy = watch_event_callback_;
- }
- if (event_name_matches) {
- if (!watch_event_callback_copy.is_null())
- watch_event_callback_copy.Run();
- }
- }
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(offset_event_timestamp,
- phase == TRACE_EVENT_PHASE_COMPLETE ?
- TRACE_EVENT_PHASE_BEGIN : phase,
- category_group_enabled, name, id,
- num_args, arg_names, arg_types, arg_values,
- flags);
- }
- }
-
- if (thread_local_event_buffer)
- thread_local_event_buffer->ReportOverhead(now, thread_now);
-
- return handle;
-}
-
-// May be called when a COMPELETE event ends and the unfinished event has been
-// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
-std::string TraceLog::EventToConsoleMessage(unsigned char phase,
- const TraceTicks& timestamp,
- TraceEvent* trace_event) {
- AutoLock thread_info_lock(thread_info_lock_);
-
- // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
- // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
- DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
-
- TimeDelta duration;
- int thread_id = trace_event ?
- trace_event->thread_id() : PlatformThread::CurrentId();
- if (phase == TRACE_EVENT_PHASE_END) {
- duration = timestamp - thread_event_start_times_[thread_id].top();
- thread_event_start_times_[thread_id].pop();
- }
-
- std::string thread_name = thread_names_[thread_id];
- if (thread_colors_.find(thread_name) == thread_colors_.end())
- thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
-
- std::ostringstream log;
- log << base::StringPrintf("%s: \x1b[0;3%dm",
- thread_name.c_str(),
- thread_colors_[thread_name]);
-
- size_t depth = 0;
- if (thread_event_start_times_.find(thread_id) !=
- thread_event_start_times_.end())
- depth = thread_event_start_times_[thread_id].size();
-
- for (size_t i = 0; i < depth; ++i)
- log << "| ";
-
- if (trace_event)
- trace_event->AppendPrettyPrinted(&log);
- if (phase == TRACE_EVENT_PHASE_END)
- log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
-
- log << "\x1b[0;m";
-
- if (phase == TRACE_EVENT_PHASE_BEGIN)
- thread_event_start_times_[thread_id].push(timestamp);
-
- return log.str();
-}
-
-void TraceLog::AddTraceEventEtw(char phase,
- const char* name,
- const void* id,
- const char* extra) {
-#if defined(OS_WIN)
- TraceEventETWProvider::Trace(name, phase, id, extra);
-#endif
- INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
- TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
-}
-
-void TraceLog::AddTraceEventEtw(char phase,
- const char* name,
- const void* id,
- const std::string& extra) {
-#if defined(OS_WIN)
- TraceEventETWProvider::Trace(name, phase, id, extra);
-#endif
- INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
- TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
-}
-
-void TraceLog::UpdateTraceEventDuration(
- const unsigned char* category_group_enabled,
- const char* name,
- TraceEventHandle handle) {
- // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
- // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
- // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
- if (thread_is_in_trace_event_.Get())
- return;
-
- AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
-
- ThreadTicks thread_now = ThreadNow();
- TraceTicks now = OffsetNow();
-
- std::string console_message;
- if (*category_group_enabled & ENABLED_FOR_RECORDING) {
- OptionalAutoLock lock(&lock_);
-
- TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
- if (trace_event) {
- DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
- trace_event->UpdateDuration(now, thread_now);
-#if defined(OS_ANDROID)
- trace_event->SendToATrace();
-#endif
- }
-
- if (trace_options() & kInternalEchoToConsole) {
- console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
- now, trace_event);
- }
- }
-
- if (console_message.size())
- LOG(ERROR) << console_message;
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
- trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
- TRACE_EVENT_FLAG_NONE);
- }
- }
-}
-
-void TraceLog::SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback) {
- const unsigned char* category = GetCategoryGroupEnabled(
- category_name.c_str());
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_,
- reinterpret_cast<subtle::AtomicWord>(category));
- watch_event_name_ = event_name;
- watch_event_callback_ = callback;
-}
-
-void TraceLog::CancelWatchEvent() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- watch_event_callback_.Reset();
-}
-
-uint64 TraceLog::MangleEventId(uint64 id) {
- return id ^ process_id_hash_;
-}
-
-void TraceLog::AddMetadataEventsWhileLocked() {
- lock_.AssertAcquired();
-
-#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- 0,
- "num_cpus", "number",
- base::SysInfo::NumberOfProcessors());
-#endif
-
-
- int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- if (process_sort_index_ != 0) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "process_sort_index", "sort_index",
- process_sort_index_);
- }
-
- if (process_name_.size()) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "process_name", "name",
- process_name_);
- }
-
- if (process_labels_.size() > 0) {
- std::vector<std::string> labels;
- for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
- it != process_labels_.end();
- it++) {
- labels.push_back(it->second);
- }
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "process_labels", "labels",
- JoinString(labels, ','));
- }
-
- // Thread sort indices.
- for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
- it != thread_sort_indices_.end();
- it++) {
- if (it->second == 0)
- continue;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first,
- "thread_sort_index", "sort_index",
- it->second);
- }
-
- // Thread names.
- AutoLock thread_info_lock(thread_info_lock_);
- for(hash_map<int, std::string>::iterator it = thread_names_.begin();
- it != thread_names_.end();
- it++) {
- if (it->second.empty())
- continue;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first,
- "thread_name", "name",
- it->second);
- }
-
- // If buffer is full, add a metadata record to report this.
- if (!buffer_limit_reached_timestamp_.is_null()) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "trace_buffer_overflowed",
- "overflowed_at_ts",
- buffer_limit_reached_timestamp_);
- }
-}
-
-void TraceLog::WaitSamplingEventForTesting() {
- if (!sampling_thread_)
- return;
- sampling_thread_->WaitSamplingEventForTesting();
-}
-
-void TraceLog::DeleteForTesting() {
- DeleteTraceLogForTesting::Delete();
-}
-
-TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
- return GetEventByHandleInternal(handle, NULL);
-}
-
-TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
- OptionalAutoLock* lock) {
- if (!handle.chunk_seq)
- return NULL;
-
- if (thread_local_event_buffer_.Get()) {
- TraceEvent* trace_event =
- thread_local_event_buffer_.Get()->GetEventByHandle(handle);
- if (trace_event)
- return trace_event;
- }
-
- // The event has been out-of-control of the thread local buffer.
- // Try to get the event from the main buffer with a lock.
- if (lock)
- lock->EnsureAcquired();
-
- if (thread_shared_chunk_ &&
- handle.chunk_index == thread_shared_chunk_index_) {
- return handle.chunk_seq == thread_shared_chunk_->seq() ?
- thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
- }
-
- return logged_events_->GetEventByHandle(handle);
-}
-
-void TraceLog::SetProcessID(int process_id) {
- process_id_ = process_id;
- // Create a FNV hash from the process ID for XORing.
- // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
- unsigned long long offset_basis = 14695981039346656037ull;
- unsigned long long fnv_prime = 1099511628211ull;
- unsigned long long pid = static_cast<unsigned long long>(process_id_);
- process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
-}
-
-void TraceLog::SetProcessSortIndex(int sort_index) {
- AutoLock lock(lock_);
- process_sort_index_ = sort_index;
-}
-
-void TraceLog::SetProcessName(const std::string& process_name) {
- AutoLock lock(lock_);
- process_name_ = process_name;
-}
-
-void TraceLog::UpdateProcessLabel(
- int label_id, const std::string& current_label) {
- if(!current_label.length())
- return RemoveProcessLabel(label_id);
-
- AutoLock lock(lock_);
- process_labels_[label_id] = current_label;
-}
-
-void TraceLog::RemoveProcessLabel(int label_id) {
- AutoLock lock(lock_);
- base::hash_map<int, std::string>::iterator it = process_labels_.find(
- label_id);
- if (it == process_labels_.end())
- return;
-
- process_labels_.erase(it);
-}
-
-void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
- AutoLock lock(lock_);
- thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
-}
-
-void TraceLog::SetTimeOffset(TimeDelta offset) {
- time_offset_ = offset;
-}
-
-size_t TraceLog::GetObserverCountForTest() const {
- return enabled_state_observer_list_.size();
-}
-
-void TraceLog::SetCurrentThreadBlocksMessageLoop() {
- thread_blocks_message_loop_.Set(true);
- if (thread_local_event_buffer_.Get()) {
- // This will flush the thread local buffer.
- delete thread_local_event_buffer_.Get();
- }
-}
-
-void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) {
- overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
-}
-
} // namespace trace_event
} // namespace base
-
-namespace trace_event_internal {
-
-ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
- const char* category_group, const char* name) {
- // The single atom works because for now the category_group can only be "gpu".
- DCHECK_EQ(strcmp(category_group, "gpu"), 0);
- static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
- category_group, atomic, category_group_enabled_);
- name_ = name;
- if (*category_group_enabled_) {
- event_handle_ =
- TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
- trace_event_internal::kNoEventId,
- static_cast<int>(base::PlatformThread::CurrentId()),
- base::TraceTicks::Now(), 0, NULL, NULL, NULL, NULL,
- TRACE_EVENT_FLAG_NONE);
- }
-}
-
-ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
- if (*category_group_enabled_) {
- TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
- name_, event_handle_);
- }
-}
-
-} // namespace trace_event_internal
diff --git a/chromium/base/trace_event/trace_event_impl.h b/chromium/base/trace_event/trace_event_impl.h
index ccb850033e1..f26e13f14b0 100644
--- a/chromium/base/trace_event/trace_event_impl.h
+++ b/chromium/base/trace_event/trace_event_impl.h
@@ -14,9 +14,7 @@
#include "base/base_export.h"
#include "base/callback.h"
#include "base/containers/hash_tables.h"
-#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted_memory.h"
-#include "base/memory/scoped_vector.h"
#include "base/observer_list.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
@@ -24,31 +22,8 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
#include "base/threading/thread_local.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/trace_config.h"
#include "base/trace_event/trace_event_memory_overhead.h"
-// Older style trace macros with explicit id and extra data
-// Only these macros result in publishing data to ETW as currently implemented.
-// TODO(georgesak): Update/replace these with new ETW macros.
-#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
- base::trace_event::TraceLog::AddTraceEventEtw( \
- TRACE_EVENT_PHASE_BEGIN, \
- name, reinterpret_cast<const void*>(id), extra)
-
-#define TRACE_EVENT_END_ETW(name, id, extra) \
- base::trace_event::TraceLog::AddTraceEventEtw( \
- TRACE_EVENT_PHASE_END, \
- name, reinterpret_cast<const void*>(id), extra)
-
-#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
- base::trace_event::TraceLog::AddTraceEventEtw( \
- TRACE_EVENT_PHASE_INSTANT, \
- name, reinterpret_cast<const void*>(id), extra)
-
-template <typename Type>
-struct DefaultSingletonTraits;
-
namespace base {
class WaitableEvent;
@@ -56,6 +31,9 @@ class MessageLoop;
namespace trace_event {
+typedef base::Callback<bool(const char* category_group_name,
+ const char* event_name)> ArgumentFilterPredicate;
+
// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
// class must implement this interface.
class BASE_EXPORT ConvertableToTraceFormat
@@ -82,14 +60,17 @@ class BASE_EXPORT ConvertableToTraceFormat
friend class RefCounted<ConvertableToTraceFormat>;
};
+const int kTraceMaxNumArgs = 2;
+
struct TraceEventHandle {
uint32 chunk_seq;
- uint16 chunk_index;
- uint16 event_index;
+ // These numbers of bits must be kept consistent with
+ // TraceBufferChunk::kMaxTrunkIndex and
+ // TraceBufferChunk::kTraceBufferChunkSize (in trace_buffer.h).
+ unsigned chunk_index : 26;
+ unsigned event_index : 6;
};
-const int kTraceMaxNumArgs = 2;
-
class BASE_EXPORT TraceEvent {
public:
union TraceValue {
@@ -116,22 +97,22 @@ class BASE_EXPORT TraceEvent {
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
+ unsigned long long context_id,
+ unsigned long long bind_id,
int num_args,
const char** arg_names,
const unsigned char* arg_types,
const unsigned long long* arg_values,
const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags);
+ unsigned int flags);
void Reset();
void UpdateDuration(const TraceTicks& now, const ThreadTicks& thread_now);
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead*);
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
// Serialize event data to JSON
- typedef base::Callback<bool(const char* category_group_name,
- const char* event_name)> ArgumentFilterPredicate;
void AppendAsJSON(
std::string* out,
const ArgumentFilterPredicate& argument_filter_predicate) const;
@@ -148,7 +129,8 @@ class BASE_EXPORT TraceEvent {
TimeDelta duration() const { return duration_; }
TimeDelta thread_duration() const { return thread_duration_; }
unsigned long long id() const { return id_; }
- unsigned char flags() const { return flags_; }
+ unsigned long long context_id() const { return context_id_; }
+ unsigned int flags() const { return flags_; }
// Exposed for unittesting:
@@ -174,7 +156,8 @@ class BASE_EXPORT TraceEvent {
TimeDelta thread_duration_;
// id_ can be used to store phase-specific data.
unsigned long long id_;
- scoped_ptr<TraceEventMemoryOverhead> cached_memory_overhead_estimate_;
+ // context_id_ is used to store context information.
+ unsigned long long context_id_;
TraceValue arg_values_[kTraceMaxNumArgs];
const char* arg_names_[kTraceMaxNumArgs];
scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
@@ -183,532 +166,13 @@ class BASE_EXPORT TraceEvent {
scoped_refptr<base::RefCountedString> parameter_copy_storage_;
int thread_id_;
char phase_;
- unsigned char flags_;
+ unsigned int flags_;
+ unsigned long long bind_id_;
unsigned char arg_types_[kTraceMaxNumArgs];
DISALLOW_COPY_AND_ASSIGN(TraceEvent);
};
-// TraceBufferChunk is the basic unit of TraceBuffer.
-class BASE_EXPORT TraceBufferChunk {
- public:
- explicit TraceBufferChunk(uint32 seq);
- ~TraceBufferChunk();
-
- void Reset(uint32 new_seq);
- TraceEvent* AddTraceEvent(size_t* event_index);
- bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
-
- uint32 seq() const { return seq_; }
- size_t capacity() const { return kTraceBufferChunkSize; }
- size_t size() const { return next_free_; }
-
- TraceEvent* GetEventAt(size_t index) {
- DCHECK(index < size());
- return &chunk_[index];
- }
- const TraceEvent* GetEventAt(size_t index) const {
- DCHECK(index < size());
- return &chunk_[index];
- }
-
- scoped_ptr<TraceBufferChunk> Clone() const;
-
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
-
- static const size_t kTraceBufferChunkSize = 64;
-
- private:
- size_t next_free_;
- scoped_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_when_full_;
- TraceEvent chunk_[kTraceBufferChunkSize];
- uint32 seq_;
-};
-
-// TraceBuffer holds the events as they are collected.
-class BASE_EXPORT TraceBuffer {
- public:
- virtual ~TraceBuffer() {}
-
- virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
- virtual void ReturnChunk(size_t index,
- scoped_ptr<TraceBufferChunk> chunk) = 0;
-
- virtual bool IsFull() const = 0;
- virtual size_t Size() const = 0;
- virtual size_t Capacity() const = 0;
- virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
-
- // For iteration. Each TraceBuffer can only be iterated once.
- virtual const TraceBufferChunk* NextChunk() = 0;
-
- virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
-
- // Computes an estimate of the size of the buffer, including all the retained
- // objects.
- virtual void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) = 0;
-};
-
-// TraceResultBuffer collects and converts trace fragments returned by TraceLog
-// to JSON output.
-class BASE_EXPORT TraceResultBuffer {
- public:
- typedef base::Callback<void(const std::string&)> OutputCallback;
-
- // If you don't need to stream JSON chunks out efficiently, and just want to
- // get a complete JSON string after calling Finish, use this struct to collect
- // JSON trace output.
- struct BASE_EXPORT SimpleOutput {
- OutputCallback GetCallback();
- void Append(const std::string& json_string);
-
- // Do what you want with the json_output_ string after calling
- // TraceResultBuffer::Finish.
- std::string json_output;
- };
-
- TraceResultBuffer();
- ~TraceResultBuffer();
-
- // Set callback. The callback will be called during Start with the initial
- // JSON output and during AddFragment and Finish with following JSON output
- // chunks. The callback target must live past the last calls to
- // TraceResultBuffer::Start/AddFragment/Finish.
- void SetOutputCallback(const OutputCallback& json_chunk_callback);
-
- // Start JSON output. This resets all internal state, so you can reuse
- // the TraceResultBuffer by calling Start.
- void Start();
-
- // Call AddFragment 0 or more times to add trace fragments from TraceLog.
- void AddFragment(const std::string& trace_fragment);
-
- // When all fragments have been added, call Finish to complete the JSON
- // formatted output.
- void Finish();
-
- private:
- OutputCallback output_callback_;
- bool append_comma_;
-};
-
-class TraceSamplingThread;
-
-struct BASE_EXPORT TraceLogStatus {
- TraceLogStatus();
- ~TraceLogStatus();
- size_t event_capacity;
- size_t event_count;
-};
-
-class BASE_EXPORT TraceLog : public MemoryDumpProvider {
- public:
- enum Mode {
- DISABLED = 0,
- RECORDING_MODE,
- MONITORING_MODE,
- };
-
- // The pointer returned from GetCategoryGroupEnabledInternal() points to a
- // value with zero or more of the following bits. Used in this class only.
- // The TRACE_EVENT macros should only use the value as a bool.
- // These values must be in sync with macro values in TraceEvent.h in Blink.
- enum CategoryGroupEnabledFlags {
- // Category group enabled for the recording mode.
- ENABLED_FOR_RECORDING = 1 << 0,
- // Category group enabled for the monitoring mode.
- ENABLED_FOR_MONITORING = 1 << 1,
- // Category group enabled by SetEventCallbackEnabled().
- ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
- // Category group enabled to export events to ETW.
- ENABLED_FOR_ETW_EXPORT = 1 << 3
- };
-
- static TraceLog* GetInstance();
-
- // Get set of known category groups. This can change as new code paths are
- // reached. The known category groups are inserted into |category_groups|.
- void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
-
- // Retrieves a copy (for thread-safety) of the current TraceConfig.
- TraceConfig GetCurrentTraceConfig() const;
-
- // Initializes the thread-local event buffer, if not already initialized and
- // if the current thread supports that (has a message loop).
- void InitializeThreadLocalEventBufferIfSupported();
-
- // Enables normal tracing (recording trace events in the trace buffer).
- // See TraceConfig comments for details on how to control what categories
- // will be traced. If tracing has already been enabled, |category_filter| will
- // be merged into the current category filter.
- void SetEnabled(const TraceConfig& trace_config, Mode mode);
-
- // Disables normal tracing for all categories.
- void SetDisabled();
-
- bool IsEnabled() { return mode_ != DISABLED; }
-
- // The number of times we have begun recording traces. If tracing is off,
- // returns -1. If tracing is on, then it returns the number of times we have
- // recorded a trace. By watching for this number to increment, you can
- // passively discover when a new trace has begun. This is then used to
- // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
- int GetNumTracesRecorded();
-
-#if defined(OS_ANDROID)
- void StartATrace();
- void StopATrace();
- void AddClockSyncMetadataEvent();
-#endif
-
- // Enabled state listeners give a callback when tracing is enabled or
- // disabled. This can be used to tie into other library's tracing systems
- // on-demand.
- class BASE_EXPORT EnabledStateObserver {
- public:
- // Called just after the tracing system becomes enabled, outside of the
- // |lock_|. TraceLog::IsEnabled() is true at this point.
- virtual void OnTraceLogEnabled() = 0;
-
- // Called just after the tracing system disables, outside of the |lock_|.
- // TraceLog::IsEnabled() is false at this point.
- virtual void OnTraceLogDisabled() = 0;
- };
- void AddEnabledStateObserver(EnabledStateObserver* listener);
- void RemoveEnabledStateObserver(EnabledStateObserver* listener);
- bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
-
- TraceLogStatus GetStatus() const;
- bool BufferIsFull() const;
-
- // Computes an estimate of the size of the TraceLog including all the retained
- // objects.
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
-
- // Not using base::Callback because of its limited by 7 parameters.
- // Also, using primitive type allows directly passing callback from WebCore.
- // WARNING: It is possible for the previously set callback to be called
- // after a call to SetEventCallbackEnabled() that replaces or a call to
- // SetEventCallbackDisabled() that disables the callback.
- // This callback may be invoked on any thread.
- // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
- // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
- // interface simple.
- typedef void (*EventCallback)(TraceTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned char flags);
-
- // Enable tracing for EventCallback.
- void SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb);
- void SetEventCallbackDisabled();
- void SetArgumentFilterPredicate(
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate);
-
- // Flush all collected events to the given output callback. The callback will
- // be called one or more times either synchronously or asynchronously from
- // the current thread with IPC-bite-size chunks. The string format is
- // undefined. Use TraceResultBuffer to convert one or more trace strings to
- // JSON. The callback can be null if the caller doesn't want any data.
- // Due to the implementation of thread-local buffers, flush can't be
- // done when tracing is enabled. If called when tracing is enabled, the
- // callback will be called directly with (empty_string, false) to indicate
- // the end of this unsuccessful flush. Flush does the serialization
- // on the same thread if the caller doesn't set use_worker_thread explicitly.
- typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
- bool has_more_events)> OutputCallback;
- void Flush(const OutputCallback& cb, bool use_worker_thread = false);
- void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
-
- // Cancels tracing and discards collected data.
- void CancelTracing(const OutputCallback& cb);
-
- // Called by TRACE_EVENT* macros, don't call this directly.
- // The name parameter is a category group for example:
- // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
- static const unsigned char* GetCategoryGroupEnabled(const char* name);
- static const char* GetCategoryGroupName(
- const unsigned char* category_group_enabled);
-
- // Called by TRACE_EVENT* macros, don't call this directly.
- // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
- // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
- TraceEventHandle AddTraceEvent(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags);
- TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int thread_id,
- const TraceTicks& timestamp,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned char flags);
- static void AddTraceEventEtw(char phase,
- const char* category_group,
- const void* id,
- const char* extra);
- static void AddTraceEventEtw(char phase,
- const char* category_group,
- const void* id,
- const std::string& extra);
-
- void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
- const char* name,
- TraceEventHandle handle);
-
- // For every matching event, the callback will be called.
- typedef base::Callback<void()> WatchEventCallback;
- void SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback);
- // Cancel the watch event. If tracing is enabled, this may race with the
- // watch event notification firing.
- void CancelWatchEvent();
-
- int process_id() const { return process_id_; }
-
- uint64 MangleEventId(uint64 id);
-
- // Exposed for unittesting:
-
- void WaitSamplingEventForTesting();
-
- // Allows deleting our singleton instance.
- static void DeleteForTesting();
-
- // Allow tests to inspect TraceEvents.
- TraceEvent* GetEventByHandle(TraceEventHandle handle);
-
- void SetProcessID(int process_id);
-
- // Process sort indices, if set, override the order of a process will appear
- // relative to other processes in the trace viewer. Processes are sorted first
- // on their sort index, ascending, then by their name, and then tid.
- void SetProcessSortIndex(int sort_index);
-
- // Sets the name of the process.
- void SetProcessName(const std::string& process_name);
-
- // Processes can have labels in addition to their names. Use labels, for
- // instance, to list out the web page titles that a process is handling.
- void UpdateProcessLabel(int label_id, const std::string& current_label);
- void RemoveProcessLabel(int label_id);
-
- // Thread sort indices, if set, override the order of a thread will appear
- // within its process in the trace viewer. Threads are sorted first on their
- // sort index, ascending, then by their name, and then tid.
- void SetThreadSortIndex(PlatformThreadId , int sort_index);
-
- // Allow setting an offset between the current TraceTicks time and the time
- // that should be reported.
- void SetTimeOffset(TimeDelta offset);
-
- size_t GetObserverCountForTest() const;
-
- // Call this method if the current thread may block the message loop to
- // prevent the thread from using the thread-local buffer because the thread
- // may not handle the flush request in time causing lost of unflushed events.
- void SetCurrentThreadBlocksMessageLoop();
-
- private:
- typedef unsigned int InternalTraceOptions;
-
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferRingBufferGetReturnChunk);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferRingBufferHalfIteration);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferRingBufferFullIteration);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferVectorReportFull);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- ConvertTraceConfigToInternalOptions);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceRecordAsMuchAsPossibleMode);
-
- // This allows constructor and destructor to be private and usable only
- // by the Singleton class.
- friend struct DefaultSingletonTraits<TraceLog>;
-
- // MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
-
- // Enable/disable each category group based on the current mode_,
- // category_filter_, event_callback_ and event_callback_category_filter_.
- // Enable the category group in the enabled mode if category_filter_ matches
- // the category group, or event_callback_ is not null and
- // event_callback_category_filter_ matches the category group.
- void UpdateCategoryGroupEnabledFlags();
- void UpdateCategoryGroupEnabledFlag(size_t category_index);
-
- // Configure synthetic delays based on the values set in the current
- // trace config.
- void UpdateSyntheticDelaysFromTraceConfig();
-
- InternalTraceOptions GetInternalOptionsFromTraceConfig(
- const TraceConfig& config);
-
- class ThreadLocalEventBuffer;
- class OptionalAutoLock;
-
- TraceLog();
- ~TraceLog() override;
- const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
- void AddMetadataEventsWhileLocked();
-
- InternalTraceOptions trace_options() const {
- return static_cast<InternalTraceOptions>(
- subtle::NoBarrier_Load(&trace_options_));
- }
-
- TraceBuffer* trace_buffer() const { return logged_events_.get(); }
- TraceBuffer* CreateTraceBuffer();
- TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
-
- std::string EventToConsoleMessage(unsigned char phase,
- const TraceTicks& timestamp,
- TraceEvent* trace_event);
-
- TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
- bool check_buffer_is_full);
- void CheckIfBufferIsFullWhileLocked();
- void SetDisabledWhileLocked();
-
- TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
- OptionalAutoLock* lock);
-
- void FlushInternal(const OutputCallback& cb,
- bool use_worker_thread,
- bool discard_events);
-
- // |generation| is used in the following callbacks to check if the callback
- // is called for the flush of the current |logged_events_|.
- void FlushCurrentThread(int generation, bool discard_events);
- // Usually it runs on a different thread.
- static void ConvertTraceEventsToTraceFormat(
- scoped_ptr<TraceBuffer> logged_events,
- const TraceLog::OutputCallback& flush_output_callback,
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate);
- void FinishFlush(int generation, bool discard_events);
- void OnFlushTimeout(int generation, bool discard_events);
-
- int generation() const {
- return static_cast<int>(subtle::NoBarrier_Load(&generation_));
- }
- bool CheckGeneration(int generation) const {
- return generation == this->generation();
- }
- void UseNextTraceBuffer();
-
- TraceTicks OffsetNow() const {
- return OffsetTimestamp(TraceTicks::Now());
- }
- TraceTicks OffsetTimestamp(const TraceTicks& timestamp) const {
- return timestamp - time_offset_;
- }
-
- // Internal representation of trace options since we store the currently used
- // trace option as an AtomicWord.
- static const InternalTraceOptions kInternalNone;
- static const InternalTraceOptions kInternalRecordUntilFull;
- static const InternalTraceOptions kInternalRecordContinuously;
- static const InternalTraceOptions kInternalEchoToConsole;
- static const InternalTraceOptions kInternalEnableSampling;
- static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
- static const InternalTraceOptions kInternalEnableArgumentFilter;
-
- // This lock protects TraceLog member accesses (except for members protected
- // by thread_info_lock_) from arbitrary threads.
- mutable Lock lock_;
- // This lock protects accesses to thread_names_, thread_event_start_times_
- // and thread_colors_.
- Lock thread_info_lock_;
- Mode mode_;
- int num_traces_recorded_;
- scoped_ptr<TraceBuffer> logged_events_;
- subtle::AtomicWord /* EventCallback */ event_callback_;
- bool dispatching_to_observer_list_;
- std::vector<EnabledStateObserver*> enabled_state_observer_list_;
-
- std::string process_name_;
- base::hash_map<int, std::string> process_labels_;
- int process_sort_index_;
- base::hash_map<int, int> thread_sort_indices_;
- base::hash_map<int, std::string> thread_names_;
-
- // The following two maps are used only when ECHO_TO_CONSOLE.
- base::hash_map<int, std::stack<TraceTicks> > thread_event_start_times_;
- base::hash_map<std::string, int> thread_colors_;
-
- TraceTicks buffer_limit_reached_timestamp_;
-
- // XORed with TraceID to make it unlikely to collide with other processes.
- unsigned long long process_id_hash_;
-
- int process_id_;
-
- TimeDelta time_offset_;
-
- // Allow tests to wake up when certain events occur.
- WatchEventCallback watch_event_callback_;
- subtle::AtomicWord /* const unsigned char* */ watch_category_;
- std::string watch_event_name_;
-
- subtle::AtomicWord /* Options */ trace_options_;
-
- // Sampling thread handles.
- scoped_ptr<TraceSamplingThread> sampling_thread_;
- PlatformThreadHandle sampling_thread_handle_;
-
- TraceConfig trace_config_;
- TraceConfig event_callback_trace_config_;
-
- ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
- ThreadLocalBoolean thread_blocks_message_loop_;
- ThreadLocalBoolean thread_is_in_trace_event_;
-
- // Contains the message loops of threads that have had at least one event
- // added into the local event buffer. Not using SingleThreadTaskRunner
- // because we need to know the life time of the message loops.
- hash_set<MessageLoop*> thread_message_loops_;
-
- // For events which can't be added into the thread local buffer, e.g. events
- // from threads without a message loop.
- scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
- size_t thread_shared_chunk_index_;
-
- // Set when asynchronous Flush is in progress.
- OutputCallback flush_output_callback_;
- scoped_refptr<SingleThreadTaskRunner> flush_task_runner_;
- TraceEvent::ArgumentFilterPredicate argument_filter_predicate_;
- subtle::AtomicWord generation_;
- bool use_worker_thread_;
-
- DISALLOW_COPY_AND_ASSIGN(TraceLog);
-};
-
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/trace_event_memory.h b/chromium/base/trace_event/trace_event_memory.h
index e2b3ae93060..7088080de9b 100644
--- a/chromium/base/trace_event/trace_event_memory.h
+++ b/chromium/base/trace_event/trace_event_memory.h
@@ -10,7 +10,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/timer/timer.h"
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
// TODO(jamescook): Windows support for memory tracing.
#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && \
@@ -43,7 +43,7 @@ class BASE_EXPORT TraceMemoryController
HeapProfilerStartFunction heap_profiler_start_function,
HeapProfilerStopFunction heap_profiler_stop_function,
GetHeapProfileFunction get_heap_profile_function);
- virtual ~TraceMemoryController();
+ ~TraceMemoryController() override;
// base::trace_event::TraceLog::EnabledStateChangedObserver overrides:
void OnTraceLogEnabled() override;
@@ -73,7 +73,7 @@ class BASE_EXPORT TraceMemoryController
GetHeapProfileFunction get_heap_profile_function_;
// Timer to schedule memory profile dumps.
- RepeatingTimer<TraceMemoryController> dump_timer_;
+ RepeatingTimer dump_timer_;
WeakPtrFactory<TraceMemoryController> weak_factory_;
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index 0cc3d59890c..ba7207d6163 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -6,18 +6,13 @@
#include <algorithm>
+#include "base/bits.h"
#include "base/memory/ref_counted_memory.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/values.h"
-namespace {
-size_t RoundUp(size_t size, size_t alignment) {
- return (size + alignment - 1) & ~(alignment - 1);
-}
-} // namespace
-
namespace base {
namespace trace_event {
@@ -61,9 +56,9 @@ void TraceEventMemoryOverhead::AddString(const std::string& str) {
// The number below are empirical and mainly based on profiling of real-world
// std::string implementations:
// - even short string end up malloc()-inc at least 32 bytes.
- // - longer stings seem to malloc() multiples of 16 bytes.
- Add("std::string",
- sizeof(std::string) + std::max<size_t>(RoundUp(str.capacity(), 16), 32u));
+ // - longer strings seem to malloc() multiples of 16 bytes.
+ const size_t capacity = bits::Align(str.capacity(), 16);
+ Add("std::string", sizeof(std::string) + std::max<size_t>(capacity, 32u));
}
void TraceEventMemoryOverhead::AddRefCountedString(
@@ -127,6 +122,13 @@ void TraceEventMemoryOverhead::AddSelf() {
Add("TraceEventMemoryOverhead", estimated_size);
}
+size_t TraceEventMemoryOverhead::GetCount(const char* object_type) const {
+ const auto& it = allocated_objects_.find(object_type);
+ if (it == allocated_objects_.end())
+ return 0u;
+ return it->second.count;
+}
+
void TraceEventMemoryOverhead::Update(const TraceEventMemoryOverhead& other) {
for (const auto& it : other.allocated_objects_) {
AddOrCreateInternal(it.first, it.second.count,
@@ -145,7 +147,7 @@ void TraceEventMemoryOverhead::DumpInto(const char* base_name,
it.second.allocated_size_in_bytes);
mad->AddScalar("resident_size", MemoryAllocatorDump::kUnitsBytes,
it.second.resident_size_in_bytes);
- mad->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ mad->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, it.second.count);
}
}
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.h b/chromium/base/trace_event/trace_event_memory_overhead.h
index 8ecf12dc969..25853d11d12 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.h
+++ b/chromium/base/trace_event/trace_event_memory_overhead.h
@@ -43,6 +43,9 @@ class BASE_EXPORT TraceEventMemoryOverhead {
// this TraceEventMemoryOverhead instance itself.
void AddSelf();
+ // Retrieves the count, that is, the count of Add*(|object_type|, ...) calls.
+ size_t GetCount(const char* object_type) const;
+
// Adds up and merges all the values from |other| to this instance.
void Update(const TraceEventMemoryOverhead& other);
diff --git a/chromium/base/trace_event/trace_event_synthetic_delay.cc b/chromium/base/trace_event/trace_event_synthetic_delay.cc
index bad79ccbc8c..cd0c364d5bf 100644
--- a/chromium/base/trace_event/trace_event_synthetic_delay.cc
+++ b/chromium/base/trace_event/trace_event_synthetic_delay.cc
@@ -24,7 +24,7 @@ class TraceEventSyntheticDelayRegistry : public TraceEventSyntheticDelayClock {
void ResetAllDelays();
// TraceEventSyntheticDelayClock implementation.
- base::TimeTicks Now() override;
+ TimeTicks Now() override;
private:
TraceEventSyntheticDelayRegistry();
@@ -34,7 +34,7 @@ class TraceEventSyntheticDelayRegistry : public TraceEventSyntheticDelayClock {
Lock lock_;
TraceEventSyntheticDelay delays_[kMaxSyntheticDelays];
TraceEventSyntheticDelay dummy_delay_;
- base::subtle::Atomic32 delay_count_;
+ subtle::Atomic32 delay_count_;
DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayRegistry);
};
@@ -57,8 +57,7 @@ void TraceEventSyntheticDelay::Initialize(
clock_ = clock;
}
-void TraceEventSyntheticDelay::SetTargetDuration(
- base::TimeDelta target_duration) {
+void TraceEventSyntheticDelay::SetTargetDuration(TimeDelta target_duration) {
AutoLock lock(lock_);
target_duration_ = target_duration;
trigger_count_ = 0;
@@ -85,7 +84,7 @@ void TraceEventSyntheticDelay::Begin() {
if (!target_duration_.ToInternalValue())
return;
- base::TimeTicks start_time = clock_->Now();
+ TimeTicks start_time = clock_->Now();
{
AutoLock lock(lock_);
if (++begin_count_ != 1)
@@ -94,15 +93,15 @@ void TraceEventSyntheticDelay::Begin() {
}
}
-void TraceEventSyntheticDelay::BeginParallel(base::TimeTicks* out_end_time) {
+void TraceEventSyntheticDelay::BeginParallel(TimeTicks* out_end_time) {
// See note in Begin().
ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
if (!target_duration_.ToInternalValue()) {
- *out_end_time = base::TimeTicks();
+ *out_end_time = TimeTicks();
return;
}
- base::TimeTicks start_time = clock_->Now();
+ TimeTicks start_time = clock_->Now();
{
AutoLock lock(lock_);
*out_end_time = CalculateEndTimeLocked(start_time);
@@ -115,7 +114,7 @@ void TraceEventSyntheticDelay::End() {
if (!target_duration_.ToInternalValue())
return;
- base::TimeTicks end_time;
+ TimeTicks end_time;
{
AutoLock lock(lock_);
if (!begin_count_ || --begin_count_ != 0)
@@ -126,21 +125,21 @@ void TraceEventSyntheticDelay::End() {
ApplyDelay(end_time);
}
-void TraceEventSyntheticDelay::EndParallel(base::TimeTicks end_time) {
+void TraceEventSyntheticDelay::EndParallel(TimeTicks end_time) {
if (!end_time.is_null())
ApplyDelay(end_time);
}
-base::TimeTicks TraceEventSyntheticDelay::CalculateEndTimeLocked(
- base::TimeTicks start_time) {
+TimeTicks TraceEventSyntheticDelay::CalculateEndTimeLocked(
+ TimeTicks start_time) {
if (mode_ == ONE_SHOT && trigger_count_++)
- return base::TimeTicks();
+ return TimeTicks();
else if (mode_ == ALTERNATING && trigger_count_++ % 2)
- return base::TimeTicks();
+ return TimeTicks();
return start_time + target_duration_;
}
-void TraceEventSyntheticDelay::ApplyDelay(base::TimeTicks end_time) {
+void TraceEventSyntheticDelay::ApplyDelay(TimeTicks end_time) {
TRACE_EVENT0("synthetic_delay", name_.c_str());
while (clock_->Now() < end_time) {
// Busy loop.
@@ -161,14 +160,14 @@ TraceEventSyntheticDelay* TraceEventSyntheticDelayRegistry::GetOrCreateDelay(
const char* name) {
// Try to find an existing delay first without locking to make the common case
// fast.
- int delay_count = base::subtle::Acquire_Load(&delay_count_);
+ int delay_count = subtle::Acquire_Load(&delay_count_);
for (int i = 0; i < delay_count; ++i) {
if (!strcmp(name, delays_[i].name_.c_str()))
return &delays_[i];
}
AutoLock lock(lock_);
- delay_count = base::subtle::Acquire_Load(&delay_count_);
+ delay_count = subtle::Acquire_Load(&delay_count_);
for (int i = 0; i < delay_count; ++i) {
if (!strcmp(name, delays_[i].name_.c_str()))
return &delays_[i];
@@ -180,19 +179,19 @@ TraceEventSyntheticDelay* TraceEventSyntheticDelayRegistry::GetOrCreateDelay(
return &dummy_delay_;
delays_[delay_count].Initialize(std::string(name), this);
- base::subtle::Release_Store(&delay_count_, delay_count + 1);
+ subtle::Release_Store(&delay_count_, delay_count + 1);
return &delays_[delay_count];
}
-base::TimeTicks TraceEventSyntheticDelayRegistry::Now() {
- return base::TimeTicks::Now();
+TimeTicks TraceEventSyntheticDelayRegistry::Now() {
+ return TimeTicks::Now();
}
void TraceEventSyntheticDelayRegistry::ResetAllDelays() {
AutoLock lock(lock_);
- int delay_count = base::subtle::Acquire_Load(&delay_count_);
+ int delay_count = subtle::Acquire_Load(&delay_count_);
for (int i = 0; i < delay_count; ++i) {
- delays_[i].SetTargetDuration(base::TimeDelta());
+ delays_[i].SetTargetDuration(TimeDelta());
delays_[i].SetClock(this);
}
}
diff --git a/chromium/base/trace_event/trace_event_system_stats_monitor.h b/chromium/base/trace_event/trace_event_system_stats_monitor.h
index 051669a35f5..0ae1f487a39 100644
--- a/chromium/base/trace_event/trace_event_system_stats_monitor.h
+++ b/chromium/base/trace_event/trace_event_system_stats_monitor.h
@@ -11,7 +11,7 @@
#include "base/memory/weak_ptr.h"
#include "base/process/process_metrics.h"
#include "base/timer/timer.h"
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
namespace base {
@@ -33,7 +33,7 @@ class BASE_EXPORT TraceEventSystemStatsMonitor
explicit TraceEventSystemStatsMonitor(
scoped_refptr<SingleThreadTaskRunner> task_runner);
- virtual ~TraceEventSystemStatsMonitor();
+ ~TraceEventSystemStatsMonitor() override;
// base::trace_event::TraceLog::EnabledStateChangedObserver overrides:
void OnTraceLogEnabled() override;
@@ -56,7 +56,7 @@ class BASE_EXPORT TraceEventSystemStatsMonitor
scoped_refptr<SingleThreadTaskRunner> task_runner_;
// Timer to schedule system profile dumps.
- RepeatingTimer<TraceEventSystemStatsMonitor> dump_timer_;
+ RepeatingTimer dump_timer_;
WeakPtrFactory<TraceEventSystemStatsMonitor> weak_factory_;
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index bb9f689907b..5020eec4ae7 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -21,6 +21,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
#include "base/values.h"
@@ -1021,7 +1022,7 @@ class AfterStateChangeEnabledStateObserver
: public TraceLog::EnabledStateObserver {
public:
AfterStateChangeEnabledStateObserver() {}
- virtual ~AfterStateChangeEnabledStateObserver() {}
+ ~AfterStateChangeEnabledStateObserver() override {}
// TraceLog::EnabledStateObserver overrides:
void OnTraceLogEnabled() override {
@@ -1052,7 +1053,7 @@ class SelfRemovingEnabledStateObserver
: public TraceLog::EnabledStateObserver {
public:
SelfRemovingEnabledStateObserver() {}
- virtual ~SelfRemovingEnabledStateObserver() {}
+ ~SelfRemovingEnabledStateObserver() override {}
// TraceLog::EnabledStateObserver overrides:
void OnTraceLogEnabled() override {}
@@ -1424,11 +1425,13 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
TraceEventHandle handle1 =
trace_event_internal::AddTraceEvent(
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
+ trace_event_internal::kNoId,
"arg1", std::string("argval"), "arg2", std::string("argval"));
// Test that static TRACE_STR_COPY string arguments are copied.
TraceEventHandle handle2 =
trace_event_internal::AddTraceEvent(
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
+ trace_event_internal::kNoId,
"arg1", TRACE_STR_COPY("argval"),
"arg2", TRACE_STR_COPY("argval"));
EXPECT_GT(tracer->GetStatus().event_count, 1u);
@@ -1451,6 +1454,7 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
TraceEventHandle handle1 =
trace_event_internal::AddTraceEvent(
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
+ trace_event_internal::kNoId,
"arg1", "argval", "arg2", "argval");
// Test that static TRACE_STR_COPY NULL string arguments are not copied.
const char* str1 = NULL;
@@ -1458,6 +1462,7 @@ TEST_F(TraceEventTestFixture, StaticStringVsString) {
TraceEventHandle handle2 =
trace_event_internal::AddTraceEvent(
TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
+ trace_event_internal::kNoId,
"arg1", TRACE_STR_COPY(str1),
"arg2", TRACE_STR_COPY(str2));
EXPECT_GT(tracer->GetStatus().event_count, 1u);
@@ -1549,7 +1554,7 @@ TEST_F(TraceEventTestFixture, ThreadNames) {
for (int i = 0; i < kNumThreads; i++) {
task_complete_events[i] = new WaitableEvent(false, false);
threads[i]->Start();
- thread_ids[i] = threads[i]->thread_id();
+ thread_ids[i] = threads[i]->GetThreadId();
threads[i]->task_runner()->PostTask(
FROM_HERE, base::Bind(&TraceManyInstantEvents, i, kNumEvents,
task_complete_events[i]));
@@ -2340,7 +2345,7 @@ class TraceEventCallbackTest : public TraceEventTestFixture {
const char* const arg_names[],
const unsigned char arg_types[],
const unsigned long long arg_values[],
- unsigned char flags) {
+ unsigned int flags) {
s_instance->collected_events_phases_.push_back(phase);
s_instance->collected_events_categories_.push_back(
TraceLog::GetCategoryGroupName(category_group_enabled));
@@ -2514,7 +2519,7 @@ TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
trace_log->SetEnabled(
TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
trace_log->logged_events_.reset(
- trace_log->CreateTraceBufferVectorOfSize(100));
+ TraceBuffer::CreateTraceBufferVectorOfSize(100));
do {
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
"all", "with_timestamp", 0, 0, TraceTicks::Now().ToInternalValue());
diff --git a/chromium/base/trace_event/trace_event_win.h b/chromium/base/trace_event/trace_event_win.h
index 41613615549..e64be4da2d2 100644
--- a/chromium/base/trace_event/trace_event_win.h
+++ b/chromium/base/trace_event/trace_event_win.h
@@ -12,11 +12,11 @@
#include "base/trace_event/trace_event.h"
#include "base/win/event_trace_provider.h"
-// Fwd.
+namespace base {
+
template <typename Type>
struct StaticMemorySingletonTraits;
-namespace base {
namespace trace_event {
// This EtwTraceProvider subclass implements ETW logging
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
new file mode 100644
index 00000000000..af7cf440686
--- /dev/null
+++ b/chromium/base/trace_event/trace_log.cc
@@ -0,0 +1,1728 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_log.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/memory/singleton.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/worker_pool.h"
+#include "base/time/time.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_profiler_allocation_context.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+#include "base/trace_event/trace_log.h"
+#include "base/trace_event/trace_sampling_thread.h"
+
+#if defined(OS_WIN)
+#include "base/trace_event/trace_event_etw_export_win.h"
+#include "base/trace_event/trace_event_win.h"
+#endif
+
+// The thread buckets for the sampling profiler.
+BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+namespace base {
+namespace internal {
+
+class DeleteTraceLogForTesting {
+ public:
+ static void Delete() {
+ Singleton<trace_event::TraceLog,
+ LeakySingletonTraits<trace_event::TraceLog>>::OnExit(0);
+ }
+};
+
+} // namespace internal
+
+namespace trace_event {
+
+namespace {
+
+// Controls the number of trace events we will buffer in-memory
+// before throwing them away.
+const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
+
+const size_t kTraceEventVectorBigBufferChunks =
+ 512000000 / kTraceBufferChunkSize;
+static_assert(
+ kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
+ "Too many big buffer chunks");
+const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
+static_assert(
+ kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
+ "Too many vector buffer chunks");
+const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
+
+// Can store results for 30 seconds with 1 ms sampling interval.
+const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
+// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
+const size_t kEchoToConsoleTraceEventBufferChunks = 256;
+
+const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
+const int kThreadFlushTimeoutMs = 3000;
+
+#if !defined(OS_NACL)
+// These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
+const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
+#endif
+
+#define MAX_CATEGORY_GROUPS 100
+
+// Parallel arrays g_category_groups and g_category_group_enabled are separate
+// so that a pointer to a member of g_category_group_enabled can be easily
+// converted to an index into g_category_groups. This allows macros to deal
+// only with char enabled pointers from g_category_group_enabled, and we can
+// convert internally to determine the category name from the char enabled
+// pointer.
+const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+ "toplevel",
+ "tracing already shutdown",
+ "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+ "__metadata"};
+
+// The enabled flag is char instead of bool so that the API can be used from C.
+unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
+// Indexes here have to match the g_category_groups array indexes above.
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
+const int g_category_metadata = 3;
+const int g_num_builtin_categories = 4;
+// Skip default categories.
+base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
+
+// The name of the current thread. This is used to decide if the current
+// thread name has changed. We combine all the seen thread names into the
+// output name for the thread.
+LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
+ LAZY_INSTANCE_INITIALIZER;
+
+ThreadTicks ThreadNow() {
+ return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
+}
+
+template <typename T>
+void InitializeMetadataEvent(TraceEvent* trace_event,
+ int thread_id,
+ const char* metadata_name,
+ const char* arg_name,
+ const T& value) {
+ if (!trace_event)
+ return;
+
+ int num_args = 1;
+ unsigned char arg_type;
+ unsigned long long arg_value;
+ ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
+ trace_event->Initialize(
+ thread_id,
+ TraceTicks(),
+ ThreadTicks(),
+ TRACE_EVENT_PHASE_METADATA,
+ &g_category_group_enabled[g_category_metadata],
+ metadata_name,
+ trace_event_internal::kNoId, // id
+ trace_event_internal::kNoId, // context_id
+ trace_event_internal::kNoId, // bind_id
+ num_args,
+ &arg_name,
+ &arg_type,
+ &arg_value,
+ nullptr,
+ TRACE_EVENT_FLAG_NONE);
+}
+
+class AutoThreadLocalBoolean {
+ public:
+ explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
+ : thread_local_boolean_(thread_local_boolean) {
+ DCHECK(!thread_local_boolean_->Get());
+ thread_local_boolean_->Set(true);
+ }
+ ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
+
+ private:
+ ThreadLocalBoolean* thread_local_boolean_;
+ DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
+};
+
+// Use this function instead of TraceEventHandle constructor to keep the
+// overhead of ScopedTracer (trace_event.h) constructor minimum.
+void MakeHandle(uint32 chunk_seq,
+ size_t chunk_index,
+ size_t event_index,
+ TraceEventHandle* handle) {
+ DCHECK(chunk_seq);
+ DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
+ DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
+ handle->chunk_seq = chunk_seq;
+ handle->chunk_index = static_cast<uint16>(chunk_index);
+ handle->event_index = static_cast<uint16>(event_index);
+}
+
+} // namespace
+
+// A helper class that allows the lock to be acquired in the middle of the scope
+// and unlocks at the end of scope if locked.
+class TraceLog::OptionalAutoLock {
+ public:
+ explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
+
+ ~OptionalAutoLock() {
+ if (locked_)
+ lock_->Release();
+ }
+
+ void EnsureAcquired() {
+ if (!locked_) {
+ lock_->Acquire();
+ locked_ = true;
+ }
+ }
+
+ private:
+ Lock* lock_;
+ bool locked_;
+ DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
+};
+
+class TraceLog::ThreadLocalEventBuffer
+ : public MessageLoop::DestructionObserver,
+ public MemoryDumpProvider {
+ public:
+ ThreadLocalEventBuffer(TraceLog* trace_log);
+ ~ThreadLocalEventBuffer() override;
+
+ TraceEvent* AddTraceEvent(TraceEventHandle* handle);
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) {
+ if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
+ handle.chunk_index != chunk_index_)
+ return NULL;
+
+ return chunk_->GetEventAt(handle.event_index);
+ }
+
+ int generation() const { return generation_; }
+
+ private:
+ // MessageLoop::DestructionObserver
+ void WillDestroyCurrentMessageLoop() override;
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
+
+ void FlushWhileLocked();
+
+ void CheckThisIsCurrentBuffer() const {
+ DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
+ }
+
+ // Since TraceLog is a leaky singleton, trace_log_ will always be valid
+ // as long as the thread exists.
+ TraceLog* trace_log_;
+ scoped_ptr<TraceBufferChunk> chunk_;
+ size_t chunk_index_;
+ int generation_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
+};
+
+TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
+ : trace_log_(trace_log),
+ chunk_index_(0),
+ generation_(trace_log->generation()) {
+ // ThreadLocalEventBuffer is created only if the thread has a message loop, so
+ // the following message_loop won't be NULL.
+ MessageLoop* message_loop = MessageLoop::current();
+ message_loop->AddDestructionObserver(this);
+
+ // This is to report the local memory usage when memory-infra is enabled.
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, ThreadTaskRunnerHandle::Get());
+
+ AutoLock lock(trace_log->lock_);
+ trace_log->thread_message_loops_.insert(message_loop);
+}
+
+TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
+ CheckThisIsCurrentBuffer();
+ MessageLoop::current()->RemoveDestructionObserver(this);
+ MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+
+ {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ trace_log_->thread_message_loops_.erase(MessageLoop::current());
+ }
+ trace_log_->thread_local_event_buffer_.Set(NULL);
+}
+
+TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
+ TraceEventHandle* handle) {
+ CheckThisIsCurrentBuffer();
+
+ if (chunk_ && chunk_->IsFull()) {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ chunk_.reset();
+ }
+ if (!chunk_) {
+ AutoLock lock(trace_log_->lock_);
+ chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
+ trace_log_->CheckIfBufferIsFullWhileLocked();
+ }
+ if (!chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle)
+ MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
+
+ return trace_event;
+}
+
+void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
+ delete this;
+}
+
+bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
+ if (!chunk_)
+ return true;
+ std::string dump_base_name = StringPrintf(
+ "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
+ TraceEventMemoryOverhead overhead;
+ chunk_->EstimateTraceMemoryOverhead(&overhead);
+ overhead.DumpInto(dump_base_name.c_str(), pmd);
+ return true;
+}
+
+void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
+ if (!chunk_)
+ return;
+
+ trace_log_->lock_.AssertAcquired();
+ if (trace_log_->CheckGeneration(generation_)) {
+ // Return the chunk to the buffer only if the generation matches.
+ trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
+ }
+ // Otherwise this method may be called from the destructor, or TraceLog will
+ // find the generation mismatch and delete this buffer soon.
+}
+
+TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
+
+TraceLogStatus::~TraceLogStatus() {}
+
+// static
+TraceLog* TraceLog::GetInstance() {
+ return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get();
+}
+
+TraceLog::TraceLog()
+ : mode_(DISABLED),
+ num_traces_recorded_(0),
+ event_callback_(0),
+ dispatching_to_observer_list_(false),
+ process_sort_index_(0),
+ process_id_hash_(0),
+ process_id_(0),
+ watch_category_(0),
+ trace_options_(kInternalRecordUntilFull),
+ sampling_thread_handle_(0),
+ trace_config_(TraceConfig()),
+ event_callback_trace_config_(TraceConfig()),
+ thread_shared_chunk_index_(0),
+ generation_(0),
+ use_worker_thread_(false) {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
+ // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
+ // sizeof(g_category_group_enabled),
+ // "trace_event category enabled");
+ for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+ ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+ "trace_event category enabled");
+ }
+#if defined(OS_NACL) // NaCl shouldn't expose the process id.
+ SetProcessID(0);
+#else
+ SetProcessID(static_cast<int>(GetCurrentProcId()));
+
+ // NaCl also shouldn't access the command line.
+ if (CommandLine::InitializedForCurrentProcess() &&
+ CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
+ std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kTraceToConsole);
+ if (filter.empty()) {
+ filter = kEchoToConsoleCategoryFilter;
+ } else {
+ filter.append(",");
+ filter.append(kEchoToConsoleCategoryFilter);
+ }
+
+ LOG(ERROR) << "Start " << switches::kTraceToConsole
+ << " with CategoryFilter '" << filter << "'.";
+ SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE);
+ }
+#endif
+
+ logged_events_.reset(CreateTraceBuffer());
+
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
+}
+
+TraceLog::~TraceLog() {}
+
+void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
+ // A ThreadLocalEventBuffer needs the message loop
+ // - to know when the thread exits;
+ // - to handle the final flush.
+ // For a thread without a message loop or the message loop may be blocked, the
+ // trace events will be added into the main buffer directly.
+ if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
+ return;
+ auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ if (thread_local_event_buffer &&
+ !CheckGeneration(thread_local_event_buffer->generation())) {
+ delete thread_local_event_buffer;
+ thread_local_event_buffer = NULL;
+ }
+ if (!thread_local_event_buffer) {
+ thread_local_event_buffer = new ThreadLocalEventBuffer(this);
+ thread_local_event_buffer_.Set(thread_local_event_buffer);
+ }
+}
+
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
+ // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
+ // (crbug.com/499731).
+ TraceEventMemoryOverhead overhead;
+ overhead.Add("TraceLog", sizeof(*this));
+ {
+ AutoLock lock(lock_);
+ if (logged_events_)
+ logged_events_->EstimateTraceMemoryOverhead(&overhead);
+ }
+ overhead.AddSelf();
+ overhead.DumpInto("tracing/main_trace_log", pmd);
+ return true;
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabled(
+ const char* category_group) {
+ TraceLog* tracelog = GetInstance();
+ if (!tracelog) {
+ DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+ return &g_category_group_enabled[g_category_already_shutdown];
+ }
+ return tracelog->GetCategoryGroupEnabledInternal(category_group);
+}
+
+const char* TraceLog::GetCategoryGroupName(
+ const unsigned char* category_group_enabled) {
+ // Calculate the index of the category group by finding
+ // category_group_enabled in g_category_group_enabled array.
+ uintptr_t category_begin =
+ reinterpret_cast<uintptr_t>(g_category_group_enabled);
+ uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
+ DCHECK(category_ptr >= category_begin &&
+ category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
+ MAX_CATEGORY_GROUPS))
+ << "out of bounds category pointer";
+ uintptr_t category_index =
+ (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
+ return g_category_groups[category_index];
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
+ unsigned char enabled_flag = 0;
+ const char* category_group = g_category_groups[category_index];
+ if (mode_ == RECORDING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+ else if (mode_ == MONITORING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_MONITORING;
+ if (event_callback_ &&
+ event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+#if defined(OS_WIN)
+ if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+ category_group)) {
+ enabled_flag |= ENABLED_FOR_ETW_EXPORT;
+ }
+#endif
+
+ g_category_group_enabled[category_index] = enabled_flag;
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlags() {
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; i++)
+ UpdateCategoryGroupEnabledFlag(i);
+}
+
+void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
+ ResetTraceEventSyntheticDelays();
+ const TraceConfig::StringList& delays =
+ trace_config_.GetSyntheticDelayValues();
+ TraceConfig::StringList::const_iterator ci;
+ for (ci = delays.begin(); ci != delays.end(); ++ci) {
+ StringTokenizer tokens(*ci, ";");
+ if (!tokens.GetNext())
+ continue;
+ TraceEventSyntheticDelay* delay =
+ TraceEventSyntheticDelay::Lookup(tokens.token());
+ while (tokens.GetNext()) {
+ std::string token = tokens.token();
+ char* duration_end;
+ double target_duration = strtod(token.c_str(), &duration_end);
+ if (duration_end != token.c_str()) {
+ delay->SetTargetDuration(TimeDelta::FromMicroseconds(
+ static_cast<int64>(target_duration * 1e6)));
+ } else if (token == "static") {
+ delay->SetMode(TraceEventSyntheticDelay::STATIC);
+ } else if (token == "oneshot") {
+ delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
+ } else if (token == "alternating") {
+ delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
+ }
+ }
+ }
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
+ const char* category_group) {
+ DCHECK(!strchr(category_group, '"'))
+ << "Category groups may not contain double quote";
+ // The g_category_groups is append only, avoid using a lock for the fast path.
+ size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < current_category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ unsigned char* category_group_enabled = NULL;
+ // This is the slow path: the lock is not held in the case above, so more
+ // than one thread could have reached here trying to add the same category.
+ // Only hold to lock when actually appending a new category, and
+ // check the categories groups again.
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ // Create a new category group.
+ DCHECK(category_index < MAX_CATEGORY_GROUPS)
+ << "must increase MAX_CATEGORY_GROUPS";
+ if (category_index < MAX_CATEGORY_GROUPS) {
+ // Don't hold on to the category_group pointer, so that we can create
+ // category groups with strings not known at compile time (this is
+ // required by SetWatchEvent).
+ const char* new_group = strdup(category_group);
+ ANNOTATE_LEAKING_OBJECT_PTR(new_group);
+ g_category_groups[category_index] = new_group;
+ DCHECK(!g_category_group_enabled[category_index]);
+ // Note that if both included and excluded patterns in the
+ // TraceConfig are empty, we exclude nothing,
+ // thereby enabling this category group.
+ UpdateCategoryGroupEnabledFlag(category_index);
+ category_group_enabled = &g_category_group_enabled[category_index];
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ } else {
+ category_group_enabled =
+ &g_category_group_enabled[g_category_categories_exhausted];
+ }
+ return category_group_enabled;
+}
+
+void TraceLog::GetKnownCategoryGroups(
+ std::vector<std::string>* category_groups) {
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = g_num_builtin_categories; i < category_index; i++)
+ category_groups->push_back(g_category_groups[i]);
+}
+
+void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
+ std::vector<EnabledStateObserver*> observer_list;
+ {
+ AutoLock lock(lock_);
+
+ // Can't enable tracing when Flush() is in progress.
+ DCHECK(!flush_task_runner_);
+
+ InternalTraceOptions new_options =
+ GetInternalOptionsFromTraceConfig(trace_config);
+
+ InternalTraceOptions old_options = trace_options();
+
+ if (IsEnabled()) {
+ if (new_options != old_options) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different "
+ << "set of options.";
+ }
+
+ if (mode != mode_) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
+ }
+
+ trace_config_.Merge(trace_config);
+ UpdateCategoryGroupEnabledFlags();
+ return;
+ }
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = mode;
+
+ if (new_options != old_options) {
+ subtle::NoBarrier_Store(&trace_options_, new_options);
+ UseNextTraceBuffer();
+ }
+
+ num_traces_recorded_++;
+
+ trace_config_ = TraceConfig(trace_config);
+ UpdateCategoryGroupEnabledFlags();
+ UpdateSyntheticDelaysFromTraceConfig();
+
+ if (new_options & kInternalEnableSampling) {
+ sampling_thread_.reset(new TraceSamplingThread);
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[0], "bucket0",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[1], "bucket1",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[2], "bucket2",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ if (!PlatformThread::Create(0, sampling_thread_.get(),
+ &sampling_thread_handle_)) {
+ DCHECK(false) << "failed to create thread";
+ }
+ }
+
+ dispatching_to_observer_list_ = true;
+ observer_list = enabled_state_observer_list_;
+ }
+ // Notify observers outside the lock in case they trigger trace events.
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogEnabled();
+
+ {
+ AutoLock lock(lock_);
+ dispatching_to_observer_list_ = false;
+ }
+}
+
+void TraceLog::SetArgumentFilterPredicate(
+ const ArgumentFilterPredicate& argument_filter_predicate) {
+ AutoLock lock(lock_);
+ DCHECK(!argument_filter_predicate.is_null());
+ DCHECK(argument_filter_predicate_.is_null());
+ argument_filter_predicate_ = argument_filter_predicate;
+}
+
+TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
+ const TraceConfig& config) {
+ InternalTraceOptions ret =
+ config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
+ if (config.IsArgumentFilterEnabled())
+ ret |= kInternalEnableArgumentFilter;
+ switch (config.GetTraceRecordMode()) {
+ case RECORD_UNTIL_FULL:
+ return ret | kInternalRecordUntilFull;
+ case RECORD_CONTINUOUSLY:
+ return ret | kInternalRecordContinuously;
+ case ECHO_TO_CONSOLE:
+ return ret | kInternalEchoToConsole;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ return ret | kInternalRecordAsMuchAsPossible;
+ }
+ NOTREACHED();
+ return kInternalNone;
+}
+
+TraceConfig TraceLog::GetCurrentTraceConfig() const {
+ AutoLock lock(lock_);
+ return trace_config_;
+}
+
+void TraceLog::SetDisabled() {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked();
+}
+
+void TraceLog::SetDisabledWhileLocked() {
+ lock_.AssertAcquired();
+
+ if (!IsEnabled())
+ return;
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = DISABLED;
+
+ if (sampling_thread_.get()) {
+ // Stop the sampling thread.
+ sampling_thread_->Stop();
+ lock_.Release();
+ PlatformThread::Join(sampling_thread_handle_);
+ lock_.Acquire();
+ sampling_thread_handle_ = PlatformThreadHandle();
+ sampling_thread_.reset();
+ }
+
+ trace_config_.Clear();
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ UpdateCategoryGroupEnabledFlags();
+ AddMetadataEventsWhileLocked();
+
+ dispatching_to_observer_list_ = true;
+ std::vector<EnabledStateObserver*> observer_list =
+ enabled_state_observer_list_;
+
+ {
+ // Dispatch to observers outside the lock in case the observer triggers a
+ // trace event.
+ AutoUnlock unlock(lock_);
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogDisabled();
+ }
+ dispatching_to_observer_list_ = false;
+}
+
+int TraceLog::GetNumTracesRecorded() {
+ AutoLock lock(lock_);
+ if (!IsEnabled())
+ return -1;
+ return num_traces_recorded_;
+}
+
+void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
+ AutoLock lock(lock_);
+ enabled_state_observer_list_.push_back(listener);
+}
+
+void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
+ AutoLock lock(lock_);
+ std::vector<EnabledStateObserver*>::iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(), listener);
+ if (it != enabled_state_observer_list_.end())
+ enabled_state_observer_list_.erase(it);
+}
+
+bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
+ AutoLock lock(lock_);
+ std::vector<EnabledStateObserver*>::const_iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(), listener);
+ return it != enabled_state_observer_list_.end();
+}
+
+TraceLogStatus TraceLog::GetStatus() const {
+ AutoLock lock(lock_);
+ TraceLogStatus result;
+ result.event_capacity = logged_events_->Capacity();
+ result.event_count = logged_events_->Size();
+ return result;
+}
+
+bool TraceLog::BufferIsFull() const {
+ AutoLock lock(lock_);
+ return logged_events_->IsFull();
+}
+
+TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
+ TraceEventHandle* handle,
+ bool check_buffer_is_full) {
+ lock_.AssertAcquired();
+
+ if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+
+ if (!thread_shared_chunk_) {
+ thread_shared_chunk_ =
+ logged_events_->GetChunk(&thread_shared_chunk_index_);
+ if (check_buffer_is_full)
+ CheckIfBufferIsFullWhileLocked();
+ }
+ if (!thread_shared_chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle) {
+ MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
+ event_index, handle);
+ }
+ return trace_event;
+}
+
+void TraceLog::CheckIfBufferIsFullWhileLocked() {
+ lock_.AssertAcquired();
+ if (logged_events_->IsFull()) {
+ if (buffer_limit_reached_timestamp_.is_null()) {
+ buffer_limit_reached_timestamp_ = OffsetNow();
+ }
+ SetDisabledWhileLocked();
+ }
+}
+
+void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
+ EventCallback cb) {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_,
+ reinterpret_cast<subtle::AtomicWord>(cb));
+ event_callback_trace_config_ = trace_config;
+ UpdateCategoryGroupEnabledFlags();
+};
+
+void TraceLog::SetEventCallbackDisabled() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_, 0);
+ UpdateCategoryGroupEnabledFlags();
+}
+
+// Flush() works as the following:
+// 1. Flush() is called in thread A whose task runner is saved in
+// flush_task_runner_;
+// 2. If thread_message_loops_ is not empty, thread A posts task to each message
+// loop to flush the thread local buffers; otherwise finish the flush;
+// 3. FlushCurrentThread() deletes the thread local event buffer:
+// - The last batch of events of the thread are flushed into the main buffer;
+// - The message loop will be removed from thread_message_loops_;
+// If this is the last message loop, finish the flush;
+// 4. If any thread hasn't finish its flush in time, finish the flush.
+void TraceLog::Flush(const TraceLog::OutputCallback& cb,
+ bool use_worker_thread) {
+ FlushInternal(cb, use_worker_thread, false);
+}
+
+void TraceLog::CancelTracing(const OutputCallback& cb) {
+ SetDisabled();
+ FlushInternal(cb, false, true);
+}
+
+void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
+ bool use_worker_thread,
+ bool discard_events) {
+ use_worker_thread_ = use_worker_thread;
+ if (IsEnabled()) {
+ // Can't flush when tracing is enabled because otherwise PostTask would
+ // - generate more trace events;
+ // - deschedule the calling thread on some platforms causing inaccurate
+ // timing of the trace events.
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ if (!cb.is_null())
+ cb.Run(empty_result, false);
+ LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
+ return;
+ }
+
+ int generation = this->generation();
+ // Copy of thread_message_loops_ to be used without locking.
+ std::vector<scoped_refptr<SingleThreadTaskRunner>>
+ thread_message_loop_task_runners;
+ {
+ AutoLock lock(lock_);
+ DCHECK(!flush_task_runner_);
+ flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
+ ? ThreadTaskRunnerHandle::Get()
+ : nullptr;
+ DCHECK_IMPLIES(thread_message_loops_.size(), flush_task_runner_);
+ flush_output_callback_ = cb;
+
+ if (thread_shared_chunk_) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+
+ if (thread_message_loops_.size()) {
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ thread_message_loop_task_runners.push_back((*it)->task_runner());
+ }
+ }
+ }
+
+ if (thread_message_loop_task_runners.size()) {
+ for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
+ thread_message_loop_task_runners[i]->PostTask(
+ FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
+ generation, discard_events));
+ }
+ flush_task_runner_->PostDelayedTask(
+ FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
+ discard_events),
+ TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
+ return;
+ }
+
+ FinishFlush(generation, discard_events);
+}
+
+// Usually it runs on a different thread.
+void TraceLog::ConvertTraceEventsToTraceFormat(
+ scoped_ptr<TraceBuffer> logged_events,
+ const OutputCallback& flush_output_callback,
+ const ArgumentFilterPredicate& argument_filter_predicate) {
+ if (flush_output_callback.is_null())
+ return;
+
+ // The callback need to be called at least once even if there is no events
+ // to let the caller know the completion of flush.
+ scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
+ while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ size_t size = json_events_str_ptr->size();
+ if (size > kTraceEventBufferSizeInBytes) {
+ flush_output_callback.Run(json_events_str_ptr, true);
+ json_events_str_ptr = new RefCountedString();
+ } else if (size) {
+ json_events_str_ptr->data().append(",\n");
+ }
+ chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
+ argument_filter_predicate);
+ }
+ }
+ flush_output_callback.Run(json_events_str_ptr, false);
+}
+
+void TraceLog::FinishFlush(int generation, bool discard_events) {
+ scoped_ptr<TraceBuffer> previous_logged_events;
+ OutputCallback flush_output_callback;
+ ArgumentFilterPredicate argument_filter_predicate;
+
+ if (!CheckGeneration(generation))
+ return;
+
+ {
+ AutoLock lock(lock_);
+
+ previous_logged_events.swap(logged_events_);
+ UseNextTraceBuffer();
+ thread_message_loops_.clear();
+
+ flush_task_runner_ = NULL;
+ flush_output_callback = flush_output_callback_;
+ flush_output_callback_.Reset();
+
+ if (trace_options() & kInternalEnableArgumentFilter) {
+ CHECK(!argument_filter_predicate_.is_null());
+ argument_filter_predicate = argument_filter_predicate_;
+ }
+ }
+
+ if (discard_events) {
+ if (!flush_output_callback.is_null()) {
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ flush_output_callback.Run(empty_result, false);
+ }
+ return;
+ }
+
+ if (use_worker_thread_ &&
+ WorkerPool::PostTask(
+ FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
+ Passed(&previous_logged_events),
+ flush_output_callback, argument_filter_predicate),
+ true)) {
+ return;
+ }
+
+ ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
+ flush_output_callback,
+ argument_filter_predicate);
+}
+
+// Run in each thread holding a local event buffer.
+void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_) {
+ // This is late. The corresponding flush has finished.
+ return;
+ }
+ }
+
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_ ||
+ thread_message_loops_.size())
+ return;
+
+ flush_task_runner_->PostTask(
+ FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
+ discard_events));
+}
+
+void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_) {
+ // Flush has finished before timeout.
+ return;
+ }
+
+ LOG(WARNING)
+ << "The following threads haven't finished flush in time. "
+ "If this happens stably for some thread, please call "
+ "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
+ "the thread to avoid its trace events from being lost.";
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ LOG(WARNING) << "Thread: " << (*it)->thread_name();
+ }
+ }
+ FinishFlush(generation, discard_events);
+}
+
+void TraceLog::FlushButLeaveBufferIntact(
+ const TraceLog::OutputCallback& flush_output_callback) {
+ scoped_ptr<TraceBuffer> previous_logged_events;
+ ArgumentFilterPredicate argument_filter_predicate;
+ {
+ AutoLock lock(lock_);
+ AddMetadataEventsWhileLocked();
+ if (thread_shared_chunk_) {
+ // Return the chunk to the main buffer to flush the sampling data.
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+ previous_logged_events = logged_events_->CloneForIteration().Pass();
+
+ if (trace_options() & kInternalEnableArgumentFilter) {
+ CHECK(!argument_filter_predicate_.is_null());
+ argument_filter_predicate = argument_filter_predicate_;
+ }
+ } // release lock
+
+ ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
+ flush_output_callback,
+ argument_filter_predicate);
+}
+
+void TraceLog::UseNextTraceBuffer() {
+ logged_events_.reset(CreateTraceBuffer());
+ subtle::NoBarrier_AtomicIncrement(&generation_, 1);
+ thread_shared_chunk_.reset();
+ thread_shared_chunk_index_ = 0;
+}
+
+TraceEventHandle TraceLog::AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TraceTicks now = base::TraceTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ id,
+ trace_event_internal::kNoId, // context_id
+ trace_event_internal::kNoId, // bind_id
+ thread_id,
+ now,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithContextId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TraceTicks now = base::TraceTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ id,
+ context_id,
+ trace_event_internal::kNoId, // bind_id
+ thread_id,
+ now,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
+}
+
+// Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
+// with kNoId as bind_id
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int thread_id,
+ const TraceTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase,
+ category_group_enabled,
+ name,
+ id,
+ context_id,
+ trace_event_internal::kNoId, // bind_id
+ thread_id,
+ timestamp,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ unsigned long long bind_id,
+ int thread_id,
+ const TraceTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ TraceEventHandle handle = {0, 0, 0};
+ if (!*category_group_enabled)
+ return handle;
+
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return handle;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ DCHECK(name);
+ DCHECK(!timestamp.is_null());
+
+ if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
+ if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
+ (flags & TRACE_EVENT_FLAG_FLOW_OUT))
+ bind_id = MangleEventId(bind_id);
+ id = MangleEventId(id);
+ }
+
+ TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
+ ThreadTicks thread_now = ThreadNow();
+
+ // |thread_local_event_buffer_| can be null if the current thread doesn't have
+ // a message loop or the message loop is blocked.
+ InitializeThreadLocalEventBufferIfSupported();
+ auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+
+ // Check and update the current thread name only if the event is for the
+ // current thread to avoid locks in most cases.
+ if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
+ const char* new_name =
+ ThreadIdNameManager::GetInstance()->GetName(thread_id);
+ // Check if the thread name has been set or changed since the previous
+ // call (if any), but don't bother if the new name is empty. Note this will
+ // not detect a thread name change within the same char* buffer address: we
+ // favor common case performance over corner case correctness.
+ if (new_name != g_current_thread_name.Get().Get() && new_name &&
+ *new_name) {
+ g_current_thread_name.Get().Set(new_name);
+
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ hash_map<int, std::string>::iterator existing_name =
+ thread_names_.find(thread_id);
+ if (existing_name == thread_names_.end()) {
+ // This is a new thread id, and a new name.
+ thread_names_[thread_id] = new_name;
+ } else {
+ // This is a thread id that we've seen before, but potentially with a
+ // new name.
+ std::vector<StringPiece> existing_names = base::SplitStringPiece(
+ existing_name->second, ",", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ bool found = std::find(existing_names.begin(), existing_names.end(),
+ new_name) != existing_names.end();
+ if (!found) {
+ if (existing_names.size())
+ existing_name->second.push_back(',');
+ existing_name->second.append(new_name);
+ }
+ }
+ }
+ }
+
+#if defined(OS_WIN)
+ // This is done sooner rather than later, to avoid creating the event and
+ // acquiring the lock, which is not needed for ETW as it's already threadsafe.
+ if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
+ TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values,
+ convertable_values);
+#endif // OS_WIN
+
+ std::string console_message;
+ if (*category_group_enabled &
+ (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
+ OptionalAutoLock lock(&lock_);
+
+ TraceEvent* trace_event = NULL;
+ if (thread_local_event_buffer) {
+ trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
+ } else {
+ lock.EnsureAcquired();
+ trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
+ }
+
+ if (trace_event) {
+ trace_event->Initialize(thread_id,
+ offset_event_timestamp,
+ thread_now,
+ phase,
+ category_group_enabled,
+ name,
+ id,
+ context_id,
+ bind_id,
+ num_args,
+ arg_names,
+ arg_types,
+ arg_values,
+ convertable_values,
+ flags);
+
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message = EventToConsoleMessage(
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ timestamp, trace_event);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (reinterpret_cast<const unsigned char*>(
+ subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
+ bool event_name_matches;
+ WatchEventCallback watch_event_callback_copy;
+ {
+ AutoLock lock(lock_);
+ event_name_matches = watch_event_name_ == name;
+ watch_event_callback_copy = watch_event_callback_;
+ }
+ if (event_name_matches) {
+ if (!watch_event_callback_copy.is_null())
+ watch_event_callback_copy.Run();
+ }
+ }
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(
+ offset_event_timestamp,
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ category_group_enabled, name, id, num_args, arg_names, arg_types,
+ arg_values, flags);
+ }
+ }
+
+ if (base::trace_event::AllocationContextTracker::capture_enabled()) {
+ if (phase == TRACE_EVENT_PHASE_BEGIN || phase == TRACE_EVENT_PHASE_COMPLETE)
+ base::trace_event::AllocationContextTracker::PushPseudoStackFrame(name);
+ else if (phase == TRACE_EVENT_PHASE_END)
+ // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
+ // is in |TraceLog::UpdateTraceEventDuration|.
+ base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
+ }
+
+ return handle;
+}
+
+// May be called when a COMPELETE event ends and the unfinished event has been
+// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
+std::string TraceLog::EventToConsoleMessage(unsigned char phase,
+ const TraceTicks& timestamp,
+ TraceEvent* trace_event) {
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
+ // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
+ DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
+
+ TimeDelta duration;
+ int thread_id =
+ trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
+ if (phase == TRACE_EVENT_PHASE_END) {
+ duration = timestamp - thread_event_start_times_[thread_id].top();
+ thread_event_start_times_[thread_id].pop();
+ }
+
+ std::string thread_name = thread_names_[thread_id];
+ if (thread_colors_.find(thread_name) == thread_colors_.end())
+ thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
+
+ std::ostringstream log;
+ log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
+ thread_colors_[thread_name]);
+
+ size_t depth = 0;
+ if (thread_event_start_times_.find(thread_id) !=
+ thread_event_start_times_.end())
+ depth = thread_event_start_times_[thread_id].size();
+
+ for (size_t i = 0; i < depth; ++i)
+ log << "| ";
+
+ if (trace_event)
+ trace_event->AppendPrettyPrinted(&log);
+ if (phase == TRACE_EVENT_PHASE_END)
+ log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
+
+ log << "\x1b[0;m";
+
+ if (phase == TRACE_EVENT_PHASE_BEGIN)
+ thread_event_start_times_[thread_id].push(timestamp);
+
+ return log.str();
+}
+
+void TraceLog::AddTraceEventEtw(char phase,
+ const char* name,
+ const void* id,
+ const char* extra) {
+#if defined(OS_WIN)
+ TraceEventETWProvider::Trace(name, phase, id, extra);
+#endif
+ INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
+ TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
+}
+
+void TraceLog::AddTraceEventEtw(char phase,
+ const char* name,
+ const void* id,
+ const std::string& extra) {
+#if defined(OS_WIN)
+ TraceEventETWProvider::Trace(name, phase, id, extra);
+#endif
+ INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
+ TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
+}
+
+void TraceLog::UpdateTraceEventDuration(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle) {
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ ThreadTicks thread_now = ThreadNow();
+ TraceTicks now = OffsetNow();
+
+ std::string console_message;
+ if (*category_group_enabled & ENABLED_FOR_RECORDING) {
+ OptionalAutoLock lock(&lock_);
+
+ TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
+ if (trace_event) {
+ DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+ trace_event->UpdateDuration(now, thread_now);
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message =
+ EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
+ }
+
+ if (base::trace_event::AllocationContextTracker::capture_enabled()) {
+ // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
+ base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+ trace_event_internal::kNoId, 0,
+ nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+ }
+ }
+}
+
+void TraceLog::SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback) {
+ const unsigned char* category =
+ GetCategoryGroupEnabled(category_name.c_str());
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_,
+ reinterpret_cast<subtle::AtomicWord>(category));
+ watch_event_name_ = event_name;
+ watch_event_callback_ = callback;
+}
+
+void TraceLog::CancelWatchEvent() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ watch_event_callback_.Reset();
+}
+
+uint64 TraceLog::MangleEventId(uint64 id) {
+ return id ^ process_id_hash_;
+}
+
+void TraceLog::AddMetadataEventsWhileLocked() {
+ lock_.AssertAcquired();
+
+#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ 0, "num_cpus", "number",
+ base::SysInfo::NumberOfProcessors());
+#endif
+
+ int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ if (process_sort_index_ != 0) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_sort_index",
+ "sort_index", process_sort_index_);
+ }
+
+ if (process_name_.size()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_name", "name",
+ process_name_);
+ }
+
+ if (process_labels_.size() > 0) {
+ std::vector<std::string> labels;
+ for (base::hash_map<int, std::string>::iterator it =
+ process_labels_.begin();
+ it != process_labels_.end(); it++) {
+ labels.push_back(it->second);
+ }
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_labels", "labels",
+ base::JoinString(labels, ","));
+ }
+
+ // Thread sort indices.
+ for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
+ it != thread_sort_indices_.end(); it++) {
+ if (it->second == 0)
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first, "thread_sort_index", "sort_index",
+ it->second);
+ }
+
+ // Thread names.
+ AutoLock thread_info_lock(thread_info_lock_);
+ for (hash_map<int, std::string>::iterator it = thread_names_.begin();
+ it != thread_names_.end(); it++) {
+ if (it->second.empty())
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first, "thread_name", "name", it->second);
+ }
+
+ // If buffer is full, add a metadata record to report this.
+ if (!buffer_limit_reached_timestamp_.is_null()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "trace_buffer_overflowed",
+ "overflowed_at_ts",
+ buffer_limit_reached_timestamp_);
+ }
+}
+
+void TraceLog::WaitSamplingEventForTesting() {
+ if (!sampling_thread_)
+ return;
+ sampling_thread_->WaitSamplingEventForTesting();
+}
+
+void TraceLog::DeleteForTesting() {
+ internal::DeleteTraceLogForTesting::Delete();
+}
+
+TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
+ return GetEventByHandleInternal(handle, NULL);
+}
+
+TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock) {
+ if (!handle.chunk_seq)
+ return NULL;
+
+ if (thread_local_event_buffer_.Get()) {
+ TraceEvent* trace_event =
+ thread_local_event_buffer_.Get()->GetEventByHandle(handle);
+ if (trace_event)
+ return trace_event;
+ }
+
+ // The event has been out-of-control of the thread local buffer.
+ // Try to get the event from the main buffer with a lock.
+ if (lock)
+ lock->EnsureAcquired();
+
+ if (thread_shared_chunk_ &&
+ handle.chunk_index == thread_shared_chunk_index_) {
+ return handle.chunk_seq == thread_shared_chunk_->seq()
+ ? thread_shared_chunk_->GetEventAt(handle.event_index)
+ : NULL;
+ }
+
+ return logged_events_->GetEventByHandle(handle);
+}
+
+void TraceLog::SetProcessID(int process_id) {
+ process_id_ = process_id;
+ // Create a FNV hash from the process ID for XORing.
+ // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
+ unsigned long long offset_basis = 14695981039346656037ull;
+ unsigned long long fnv_prime = 1099511628211ull;
+ unsigned long long pid = static_cast<unsigned long long>(process_id_);
+ process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
+}
+
+void TraceLog::SetProcessSortIndex(int sort_index) {
+ AutoLock lock(lock_);
+ process_sort_index_ = sort_index;
+}
+
+void TraceLog::SetProcessName(const std::string& process_name) {
+ AutoLock lock(lock_);
+ process_name_ = process_name;
+}
+
+void TraceLog::UpdateProcessLabel(int label_id,
+ const std::string& current_label) {
+ if (!current_label.length())
+ return RemoveProcessLabel(label_id);
+
+ AutoLock lock(lock_);
+ process_labels_[label_id] = current_label;
+}
+
+void TraceLog::RemoveProcessLabel(int label_id) {
+ AutoLock lock(lock_);
+ base::hash_map<int, std::string>::iterator it =
+ process_labels_.find(label_id);
+ if (it == process_labels_.end())
+ return;
+
+ process_labels_.erase(it);
+}
+
+void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
+ AutoLock lock(lock_);
+ thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
+}
+
+void TraceLog::SetTimeOffset(TimeDelta offset) {
+ time_offset_ = offset;
+}
+
+size_t TraceLog::GetObserverCountForTest() const {
+ return enabled_state_observer_list_.size();
+}
+
+void TraceLog::SetCurrentThreadBlocksMessageLoop() {
+ thread_blocks_message_loop_.Set(true);
+ if (thread_local_event_buffer_.Get()) {
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+ }
+}
+
+TraceBuffer* TraceLog::CreateTraceBuffer() {
+ InternalTraceOptions options = trace_options();
+ if (options & kInternalRecordContinuously)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kTraceEventRingBufferChunks);
+ else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kMonitorTraceEventBufferChunks);
+ else if (options & kInternalEchoToConsole)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kEchoToConsoleTraceEventBufferChunks);
+ else if (options & kInternalRecordAsMuchAsPossible)
+ return TraceBuffer::CreateTraceBufferVectorOfSize(
+ kTraceEventVectorBigBufferChunks);
+ return TraceBuffer::CreateTraceBufferVectorOfSize(
+ kTraceEventVectorBufferChunks);
+}
+
+#if defined(OS_WIN)
+void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ // Go through each category and set/clear the ETW bit depending on whether the
+ // category is enabled.
+ for (size_t i = 0; i < category_index; i++) {
+ const char* category_group = g_category_groups[i];
+ DCHECK(category_group);
+ if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+ category_group)) {
+ g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
+ } else {
+ g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
+ }
+ }
+}
+#endif // defined(OS_WIN)
+
+void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
+}
+
+} // namespace trace_event
+} // namespace base
+
+namespace trace_event_internal {
+
+ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
+ const char* category_group,
+ const char* name) {
+ // The single atom works because for now the category_group can only be "gpu".
+ DCHECK_EQ(strcmp(category_group, "gpu"), 0);
+ static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
+ category_group, atomic, category_group_enabled_);
+ name_ = name;
+ if (*category_group_enabled_) {
+ event_handle_ =
+ TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ TRACE_EVENT_PHASE_COMPLETE,
+ category_group_enabled_,
+ name,
+ trace_event_internal::kNoId, // id
+ trace_event_internal::kNoId, // context_id
+ static_cast<int>(base::PlatformThread::CurrentId()), // thread_id
+ base::TraceTicks::Now(),
+ trace_event_internal::kZeroNumArgs,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ TRACE_EVENT_FLAG_NONE);
+ }
+}
+
+ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
+ if (*category_group_enabled_) {
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
+ event_handle_);
+ }
+}
+
+} // namespace trace_event_internal
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
new file mode 100644
index 00000000000..4fb33c177a9
--- /dev/null
+++ b/chromium/base/trace_event/trace_log.h
@@ -0,0 +1,494 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_LOG_H_
+#define BASE_TRACE_EVENT_TRACE_LOG_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_impl.h"
+
+// Older style trace macros with explicit id and extra data
+// Only these macros result in publishing data to ETW as currently implemented.
+// TODO(georgesak): Update/replace these with new ETW macros.
+#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
+ base::trace_event::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_BEGIN, name, reinterpret_cast<const void*>(id), extra)
+
+#define TRACE_EVENT_END_ETW(name, id, extra) \
+ base::trace_event::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_END, name, reinterpret_cast<const void*>(id), extra)
+
+#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
+ base::trace_event::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_INSTANT, name, reinterpret_cast<const void*>(id), \
+ extra)
+
+namespace base {
+
+template <typename Type>
+struct DefaultSingletonTraits;
+class RefCountedString;
+
+namespace trace_event {
+
+class TraceBuffer;
+class TraceBufferChunk;
+class TraceEvent;
+class TraceEventMemoryOverhead;
+class TraceSamplingThread;
+
+struct BASE_EXPORT TraceLogStatus {
+ TraceLogStatus();
+ ~TraceLogStatus();
+ size_t event_capacity;
+ size_t event_count;
+};
+
+class BASE_EXPORT TraceLog : public MemoryDumpProvider {
+ public:
+ enum Mode {
+ DISABLED = 0,
+ RECORDING_MODE,
+ MONITORING_MODE,
+ };
+
+ // The pointer returned from GetCategoryGroupEnabledInternal() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in TraceEvent.h in Blink.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ ENABLED_FOR_RECORDING = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ ENABLED_FOR_MONITORING = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+ // Category group enabled to export events to ETW.
+ ENABLED_FOR_ETW_EXPORT = 1 << 3
+ };
+
+ static TraceLog* GetInstance();
+
+ // Get set of known category groups. This can change as new code paths are
+ // reached. The known category groups are inserted into |category_groups|.
+ void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
+
+ // Retrieves a copy (for thread-safety) of the current TraceConfig.
+ TraceConfig GetCurrentTraceConfig() const;
+
+ // Initializes the thread-local event buffer, if not already initialized and
+ // if the current thread supports that (has a message loop).
+ void InitializeThreadLocalEventBufferIfSupported();
+
+ // Enables normal tracing (recording trace events in the trace buffer).
+ // See TraceConfig comments for details on how to control what categories
+ // will be traced. If tracing has already been enabled, |category_filter| will
+ // be merged into the current category filter.
+ void SetEnabled(const TraceConfig& trace_config, Mode mode);
+
+ // Disables normal tracing for all categories.
+ void SetDisabled();
+
+ bool IsEnabled() { return mode_ != DISABLED; }
+
+ // The number of times we have begun recording traces. If tracing is off,
+ // returns -1. If tracing is on, then it returns the number of times we have
+ // recorded a trace. By watching for this number to increment, you can
+ // passively discover when a new trace has begun. This is then used to
+ // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
+ int GetNumTracesRecorded();
+
+#if defined(OS_ANDROID)
+ void StartATrace();
+ void StopATrace();
+ void AddClockSyncMetadataEvent();
+#endif
+
+ // Enabled state listeners give a callback when tracing is enabled or
+ // disabled. This can be used to tie into other library's tracing systems
+ // on-demand.
+ class BASE_EXPORT EnabledStateObserver {
+ public:
+ virtual ~EnabledStateObserver() = default;
+
+ // Called just after the tracing system becomes enabled, outside of the
+ // |lock_|. TraceLog::IsEnabled() is true at this point.
+ virtual void OnTraceLogEnabled() = 0;
+
+ // Called just after the tracing system disables, outside of the |lock_|.
+ // TraceLog::IsEnabled() is false at this point.
+ virtual void OnTraceLogDisabled() = 0;
+ };
+ void AddEnabledStateObserver(EnabledStateObserver* listener);
+ void RemoveEnabledStateObserver(EnabledStateObserver* listener);
+ bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
+
+ TraceLogStatus GetStatus() const;
+ bool BufferIsFull() const;
+
+ // Computes an estimate of the size of the TraceLog including all the retained
+ // objects.
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ // Not using base::Callback because of its limited by 7 parameters.
+ // Also, using primitive type allows directly passing callback from WebCore.
+ // WARNING: It is possible for the previously set callback to be called
+ // after a call to SetEventCallbackEnabled() that replaces or a call to
+ // SetEventCallbackDisabled() that disables the callback.
+ // This callback may be invoked on any thread.
+ // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
+ // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
+ // interface simple.
+ typedef void (*EventCallback)(TraceTicks timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char* const arg_names[],
+ const unsigned char arg_types[],
+ const unsigned long long arg_values[],
+ unsigned int flags);
+
+ // Enable tracing for EventCallback.
+ void SetEventCallbackEnabled(const TraceConfig& trace_config,
+ EventCallback cb);
+ void SetEventCallbackDisabled();
+ void SetArgumentFilterPredicate(
+ const ArgumentFilterPredicate& argument_filter_predicate);
+
+ // Flush all collected events to the given output callback. The callback will
+ // be called one or more times either synchronously or asynchronously from
+ // the current thread with IPC-bite-size chunks. The string format is
+ // undefined. Use TraceResultBuffer to convert one or more trace strings to
+ // JSON. The callback can be null if the caller doesn't want any data.
+ // Due to the implementation of thread-local buffers, flush can't be
+ // done when tracing is enabled. If called when tracing is enabled, the
+ // callback will be called directly with (empty_string, false) to indicate
+ // the end of this unsuccessful flush. Flush does the serialization
+ // on the same thread if the caller doesn't set use_worker_thread explicitly.
+ typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
+ bool has_more_events)> OutputCallback;
+ void Flush(const OutputCallback& cb, bool use_worker_thread = false);
+ void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
+
+ // Cancels tracing and discards collected data.
+ void CancelTracing(const OutputCallback& cb);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // The name parameter is a category group for example:
+ // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
+ static const unsigned char* GetCategoryGroupEnabled(const char* name);
+ static const char* GetCategoryGroupName(
+ const unsigned char* category_group_enabled);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
+ // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
+ TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithContextId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int thread_id,
+ const TraceTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ unsigned long long bind_id,
+ int thread_id,
+ const TraceTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ static void AddTraceEventEtw(char phase,
+ const char* category_group,
+ const void* id,
+ const char* extra);
+ static void AddTraceEventEtw(char phase,
+ const char* category_group,
+ const void* id,
+ const std::string& extra);
+
+ void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle);
+
+ // For every matching event, the callback will be called.
+ typedef base::Callback<void()> WatchEventCallback;
+ void SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback);
+ // Cancel the watch event. If tracing is enabled, this may race with the
+ // watch event notification firing.
+ void CancelWatchEvent();
+
+ int process_id() const { return process_id_; }
+
+ uint64 MangleEventId(uint64 id);
+
+ // Exposed for unittesting:
+
+ void WaitSamplingEventForTesting();
+
+ // Allows deleting our singleton instance.
+ static void DeleteForTesting();
+
+ // Allow tests to inspect TraceEvents.
+ TraceEvent* GetEventByHandle(TraceEventHandle handle);
+
+ void SetProcessID(int process_id);
+
+ // Process sort indices, if set, override the order of a process will appear
+ // relative to other processes in the trace viewer. Processes are sorted first
+ // on their sort index, ascending, then by their name, and then tid.
+ void SetProcessSortIndex(int sort_index);
+
+ // Sets the name of the process.
+ void SetProcessName(const std::string& process_name);
+
+ // Processes can have labels in addition to their names. Use labels, for
+ // instance, to list out the web page titles that a process is handling.
+ void UpdateProcessLabel(int label_id, const std::string& current_label);
+ void RemoveProcessLabel(int label_id);
+
+ // Thread sort indices, if set, override the order of a thread will appear
+ // within its process in the trace viewer. Threads are sorted first on their
+ // sort index, ascending, then by their name, and then tid.
+ void SetThreadSortIndex(PlatformThreadId thread_id, int sort_index);
+
+ // Allow setting an offset between the current TraceTicks time and the time
+ // that should be reported.
+ void SetTimeOffset(TimeDelta offset);
+
+ size_t GetObserverCountForTest() const;
+
+ // Call this method if the current thread may block the message loop to
+ // prevent the thread from using the thread-local buffer because the thread
+ // may not handle the flush request in time causing lost of unflushed events.
+ void SetCurrentThreadBlocksMessageLoop();
+
+#if defined(OS_WIN)
+ // This function is called by the ETW exporting module whenever the ETW
+ // keyword (flags) changes. This keyword indicates which categories should be
+ // exported, so whenever it changes, we adjust accordingly.
+ void UpdateETWCategoryGroupEnabledFlags();
+#endif
+
+ private:
+ typedef unsigned int InternalTraceOptions;
+
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferGetReturnChunk);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferHalfIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferFullIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, TraceBufferVectorReportFull);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ ConvertTraceConfigToInternalOptions);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceRecordAsMuchAsPossibleMode);
+
+ // This allows constructor and destructor to be private and usable only
+ // by the Singleton class.
+ friend struct DefaultSingletonTraits<TraceLog>;
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
+
+ // Enable/disable each category group based on the current mode_,
+ // category_filter_, event_callback_ and event_callback_category_filter_.
+ // Enable the category group in the enabled mode if category_filter_ matches
+ // the category group, or event_callback_ is not null and
+ // event_callback_category_filter_ matches the category group.
+ void UpdateCategoryGroupEnabledFlags();
+ void UpdateCategoryGroupEnabledFlag(size_t category_index);
+
+ // Configure synthetic delays based on the values set in the current
+ // trace config.
+ void UpdateSyntheticDelaysFromTraceConfig();
+
+ InternalTraceOptions GetInternalOptionsFromTraceConfig(
+ const TraceConfig& config);
+
+ class ThreadLocalEventBuffer;
+ class OptionalAutoLock;
+
+ TraceLog();
+ ~TraceLog() override;
+ const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
+ void AddMetadataEventsWhileLocked();
+
+ InternalTraceOptions trace_options() const {
+ return static_cast<InternalTraceOptions>(
+ subtle::NoBarrier_Load(&trace_options_));
+ }
+
+ TraceBuffer* trace_buffer() const { return logged_events_.get(); }
+ TraceBuffer* CreateTraceBuffer();
+
+ std::string EventToConsoleMessage(unsigned char phase,
+ const TraceTicks& timestamp,
+ TraceEvent* trace_event);
+
+ TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
+ bool check_buffer_is_full);
+ void CheckIfBufferIsFullWhileLocked();
+ void SetDisabledWhileLocked();
+
+ TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock);
+
+ void FlushInternal(const OutputCallback& cb,
+ bool use_worker_thread,
+ bool discard_events);
+
+ // |generation| is used in the following callbacks to check if the callback
+ // is called for the flush of the current |logged_events_|.
+ void FlushCurrentThread(int generation, bool discard_events);
+ // Usually it runs on a different thread.
+ static void ConvertTraceEventsToTraceFormat(
+ scoped_ptr<TraceBuffer> logged_events,
+ const TraceLog::OutputCallback& flush_output_callback,
+ const ArgumentFilterPredicate& argument_filter_predicate);
+ void FinishFlush(int generation, bool discard_events);
+ void OnFlushTimeout(int generation, bool discard_events);
+
+ int generation() const {
+ return static_cast<int>(subtle::NoBarrier_Load(&generation_));
+ }
+ bool CheckGeneration(int generation) const {
+ return generation == this->generation();
+ }
+ void UseNextTraceBuffer();
+
+ TraceTicks OffsetNow() const { return OffsetTimestamp(TraceTicks::Now()); }
+ TraceTicks OffsetTimestamp(const TraceTicks& timestamp) const {
+ return timestamp - time_offset_;
+ }
+
+ // Internal representation of trace options since we store the currently used
+ // trace option as an AtomicWord.
+ static const InternalTraceOptions kInternalNone;
+ static const InternalTraceOptions kInternalRecordUntilFull;
+ static const InternalTraceOptions kInternalRecordContinuously;
+ static const InternalTraceOptions kInternalEchoToConsole;
+ static const InternalTraceOptions kInternalEnableSampling;
+ static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
+ static const InternalTraceOptions kInternalEnableArgumentFilter;
+
+ // This lock protects TraceLog member accesses (except for members protected
+ // by thread_info_lock_) from arbitrary threads.
+ mutable Lock lock_;
+ // This lock protects accesses to thread_names_, thread_event_start_times_
+ // and thread_colors_.
+ Lock thread_info_lock_;
+ Mode mode_;
+ int num_traces_recorded_;
+ scoped_ptr<TraceBuffer> logged_events_;
+ subtle::AtomicWord /* EventCallback */ event_callback_;
+ bool dispatching_to_observer_list_;
+ std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+
+ std::string process_name_;
+ base::hash_map<int, std::string> process_labels_;
+ int process_sort_index_;
+ base::hash_map<int, int> thread_sort_indices_;
+ base::hash_map<int, std::string> thread_names_;
+
+ // The following two maps are used only when ECHO_TO_CONSOLE.
+ base::hash_map<int, std::stack<TraceTicks>> thread_event_start_times_;
+ base::hash_map<std::string, int> thread_colors_;
+
+ TraceTicks buffer_limit_reached_timestamp_;
+
+ // XORed with TraceID to make it unlikely to collide with other processes.
+ unsigned long long process_id_hash_;
+
+ int process_id_;
+
+ TimeDelta time_offset_;
+
+ // Allow tests to wake up when certain events occur.
+ WatchEventCallback watch_event_callback_;
+ subtle::AtomicWord /* const unsigned char* */ watch_category_;
+ std::string watch_event_name_;
+
+ subtle::AtomicWord /* Options */ trace_options_;
+
+ // Sampling thread handles.
+ scoped_ptr<TraceSamplingThread> sampling_thread_;
+ PlatformThreadHandle sampling_thread_handle_;
+
+ TraceConfig trace_config_;
+ TraceConfig event_callback_trace_config_;
+
+ ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
+ ThreadLocalBoolean thread_blocks_message_loop_;
+ ThreadLocalBoolean thread_is_in_trace_event_;
+
+ // Contains the message loops of threads that have had at least one event
+ // added into the local event buffer. Not using SingleThreadTaskRunner
+ // because we need to know the life time of the message loops.
+ hash_set<MessageLoop*> thread_message_loops_;
+
+ // For events which can't be added into the thread local buffer, e.g. events
+ // from threads without a message loop.
+ scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
+ size_t thread_shared_chunk_index_;
+
+ // Set when asynchronous Flush is in progress.
+ OutputCallback flush_output_callback_;
+ scoped_refptr<SingleThreadTaskRunner> flush_task_runner_;
+ ArgumentFilterPredicate argument_filter_predicate_;
+ subtle::AtomicWord generation_;
+ bool use_worker_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceLog);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_LOG_H_
diff --git a/chromium/base/trace_event/trace_event_impl_constants.cc b/chromium/base/trace_event/trace_log_constants.cc
index b7f3b4c4a2e..cd2ff0dad3f 100644
--- a/chromium/base/trace_event/trace_event_impl_constants.cc
+++ b/chromium/base/trace_event/trace_log_constants.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
namespace base {
namespace trace_event {
diff --git a/chromium/base/trace_event/trace_sampling_thread.cc b/chromium/base/trace_event/trace_sampling_thread.cc
new file mode 100644
index 00000000000..32ce7bde769
--- /dev/null
+++ b/chromium/base/trace_event/trace_sampling_thread.cc
@@ -0,0 +1,101 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_sampling_thread.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData {
+ public:
+ TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback);
+ ~TraceBucketData();
+
+ TRACE_EVENT_API_ATOMIC_WORD* bucket;
+ const char* bucket_name;
+ TraceSampleCallback callback;
+};
+
+TraceSamplingThread::TraceSamplingThread()
+ : thread_running_(false), waitable_event_for_testing_(false, false) {}
+
+TraceSamplingThread::~TraceSamplingThread() {}
+
+void TraceSamplingThread::ThreadMain() {
+ PlatformThread::SetName("Sampling Thread");
+ thread_running_ = true;
+ const int kSamplingFrequencyMicroseconds = 1000;
+ while (!cancellation_flag_.IsSet()) {
+ PlatformThread::Sleep(
+ TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
+ GetSamples();
+ waitable_event_for_testing_.Signal();
+ }
+}
+
+// static
+void TraceSamplingThread::DefaultSamplingCallback(
+ TraceBucketData* bucket_data) {
+ TRACE_EVENT_API_ATOMIC_WORD category_and_name =
+ TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
+ if (!category_and_name)
+ return;
+ const char* const combined =
+ reinterpret_cast<const char* const>(category_and_name);
+ const char* category_group;
+ const char* name;
+ ExtractCategoryAndName(combined, &category_group, &name);
+ TRACE_EVENT_API_ADD_TRACE_EVENT(
+ TRACE_EVENT_PHASE_SAMPLE,
+ TraceLog::GetCategoryGroupEnabled(category_group), name, 0, 0, NULL, NULL,
+ NULL, NULL, 0);
+}
+
+void TraceSamplingThread::GetSamples() {
+ for (size_t i = 0; i < sample_buckets_.size(); ++i) {
+ TraceBucketData* bucket_data = &sample_buckets_[i];
+ bucket_data->callback.Run(bucket_data);
+ }
+}
+
+void TraceSamplingThread::RegisterSampleBucket(
+ TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback) {
+ // Access to sample_buckets_ doesn't cause races with the sampling thread
+ // that uses the sample_buckets_, because it is guaranteed that
+ // RegisterSampleBucket is called before the sampling thread is created.
+ DCHECK(!thread_running_);
+ sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
+}
+
+// static
+void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name) {
+ *category = combined;
+ *name = &combined[strlen(combined) + 1];
+}
+
+void TraceSamplingThread::Stop() {
+ cancellation_flag_.Set();
+}
+
+void TraceSamplingThread::WaitSamplingEventForTesting() {
+ waitable_event_for_testing_.Wait();
+}
+
+TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback)
+ : bucket(bucket), bucket_name(name), callback(callback) {}
+
+TraceBucketData::~TraceBucketData() {}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_sampling_thread.h b/chromium/base/trace_event/trace_sampling_thread.h
new file mode 100644
index 00000000000..f976a80e07c
--- /dev/null
+++ b/chromium/base/trace_event/trace_sampling_thread.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData;
+typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
+
+// This object must be created on the IO thread.
+class TraceSamplingThread : public PlatformThread::Delegate {
+ public:
+ TraceSamplingThread();
+ ~TraceSamplingThread() override;
+
+ // Implementation of PlatformThread::Delegate:
+ void ThreadMain() override;
+
+ static void DefaultSamplingCallback(TraceBucketData* bucket_data);
+
+ void Stop();
+ void WaitSamplingEventForTesting();
+
+ private:
+ friend class TraceLog;
+
+ void GetSamples();
+ // Not thread-safe. Once the ThreadMain has been called, this can no longer
+ // be called.
+ void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback);
+ // Splits a combined "category\0name" into the two component parts.
+ static void ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name);
+ std::vector<TraceBucketData> sample_buckets_;
+ bool thread_running_;
+ CancellationFlag cancellation_flag_;
+ WaitableEvent waitable_event_for_testing_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
diff --git a/chromium/base/trace_event/winheap_dump_provider_win.cc b/chromium/base/trace_event/winheap_dump_provider_win.cc
index c8921f445a6..7d003c96979 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win.cc
+++ b/chromium/base/trace_event/winheap_dump_provider_win.cc
@@ -7,32 +7,35 @@
#include <windows.h>
#include "base/debug/profiler.h"
+#include "base/strings/string_util.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/win/windows_version.h"
namespace base {
namespace trace_event {
+#define DUMP_ROOT_NAME "winheap"
+// static
+const char WinHeapDumpProvider::kAllocatedObjects[] =
+ DUMP_ROOT_NAME "/allocated_objects";
+
namespace {
// Report a heap dump to a process memory dump. The |heap_info| structure
// contains the information about this heap, and |dump_absolute_name| will be
// used to represent it in the report.
-void ReportHeapDump(ProcessMemoryDump* pmd,
- const WinHeapInfo& heap_info,
- const std::string& dump_absolute_name) {
- MemoryAllocatorDump* outer_dump =
- pmd->CreateAllocatorDump(dump_absolute_name);
+void ReportHeapDump(ProcessMemoryDump* pmd, const WinHeapInfo& heap_info) {
+ MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump(DUMP_ROOT_NAME);
outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
heap_info.committed_size);
MemoryAllocatorDump* inner_dump =
- pmd->CreateAllocatorDump(dump_absolute_name + "/allocated_objects");
+ pmd->CreateAllocatorDump(WinHeapDumpProvider::kAllocatedObjects);
inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
heap_info.allocated_size);
- inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectsCount,
+ inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects,
heap_info.block_count);
}
@@ -44,7 +47,8 @@ WinHeapDumpProvider* WinHeapDumpProvider::GetInstance() {
LeakySingletonTraits<WinHeapDumpProvider>>::get();
}
-bool WinHeapDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
+bool WinHeapDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) {
// This method might be flaky for 2 reasons:
// - GetProcessHeaps is racy by design. It returns a snapshot of the
// available heaps, but there's no guarantee that that snapshot remains
@@ -96,7 +100,7 @@ bool WinHeapDumpProvider::OnMemoryDump(ProcessMemoryDump* pmd) {
all_heap_info.block_count += heap_info.block_count;
}
// Report the heap dump.
- ReportHeapDump(pmd, all_heap_info, "winheap");
+ ReportHeapDump(pmd, all_heap_info);
return true;
}
diff --git a/chromium/base/trace_event/winheap_dump_provider_win.h b/chromium/base/trace_event/winheap_dump_provider_win.h
index 99239a066ef..e3653550d44 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win.h
+++ b/chromium/base/trace_event/winheap_dump_provider_win.h
@@ -26,10 +26,15 @@ struct WinHeapInfo {
// about them.
class BASE_EXPORT WinHeapDumpProvider : public MemoryDumpProvider {
public:
+ // Name of the allocated_objects dump. Use this to declare suballocator dumps
+ // from other dump providers.
+ static const char kAllocatedObjects[];
+
static WinHeapDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override;
private:
friend struct DefaultSingletonTraits<WinHeapDumpProvider>;
diff --git a/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc b/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
index 865a1d2f5ce..2a072aa57bf 100644
--- a/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
+++ b/chromium/base/trace_event/winheap_dump_provider_win_unittest.cc
@@ -15,12 +15,13 @@ namespace trace_event {
TEST(WinHeapDumpProviderTest, OnMemoryDump) {
ProcessMemoryDump pmd(make_scoped_refptr(new MemoryDumpSessionState()));
+ MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
WinHeapDumpProvider* winheap_dump_provider =
WinHeapDumpProvider::GetInstance();
ASSERT_NE(static_cast<WinHeapDumpProvider*>(nullptr), winheap_dump_provider);
- ASSERT_NO_FATAL_FAILURE(winheap_dump_provider->OnMemoryDump(&pmd));
+ ASSERT_NO_FATAL_FAILURE(winheap_dump_provider->OnMemoryDump(dump_args, &pmd));
}
} // namespace trace_event
diff --git a/chromium/base/tracked_objects.cc b/chromium/base/tracked_objects.cc
index 9db05c0d3fd..c7a6a3f3c95 100644
--- a/chromium/base/tracked_objects.cc
+++ b/chromium/base/tracked_objects.cc
@@ -310,7 +310,7 @@ base::LazyInstance<base::Lock>::Leaky
ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
// static
-ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
+base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
ThreadData::ThreadData(const std::string& suggested_name)
: next_(NULL),
@@ -692,7 +692,7 @@ static void OptionallyInitializeAlternateTimer() {
}
void ThreadData::Initialize() {
- if (status_ >= DEACTIVATED)
+ if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
return; // Someone else did the initialization.
// Due to racy lazy initialization in tests, we'll need to recheck status_
// after we acquire the lock.
@@ -701,7 +701,7 @@ void ThreadData::Initialize() {
// threaded in the product, but some tests may be racy and lazy about our
// initialization.
base::AutoLock lock(*list_lock_.Pointer());
- if (status_ >= DEACTIVATED)
+ if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
return; // Someone raced in here and beat us.
// Put an alternate timer in place if the environment calls for it, such as
@@ -714,12 +714,12 @@ void ThreadData::Initialize() {
// Perform the "real" TLS initialization now, and leave it intact through
// process termination.
if (!tls_index_.initialized()) { // Testing may have initialized this.
- DCHECK_EQ(status_, UNINITIALIZED);
+ DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), UNINITIALIZED);
tls_index_.Initialize(&ThreadData::OnThreadTermination);
DCHECK(tls_index_.initialized());
} else {
// TLS was initialzed for us earlier.
- DCHECK_EQ(status_, DORMANT_DURING_TESTS);
+ DCHECK_EQ(base::subtle::NoBarrier_Load(&status_), DORMANT_DURING_TESTS);
}
// Incarnation counter is only significant to testing, as it otherwise will
@@ -729,8 +729,8 @@ void ThreadData::Initialize() {
// The lock is not critical for setting status_, but it doesn't hurt. It also
// ensures that if we have a racy initialization, that we'll bail as soon as
// we get the lock earlier in this method.
- status_ = kInitialStartupState;
- DCHECK(status_ != UNINITIALIZED);
+ base::subtle::Release_Store(&status_, kInitialStartupState);
+ DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
}
// static
@@ -742,17 +742,17 @@ void ThreadData::InitializeAndSetTrackingStatus(Status status) {
if (status > DEACTIVATED)
status = PROFILING_ACTIVE;
- status_ = status;
+ base::subtle::Release_Store(&status_, status);
}
// static
ThreadData::Status ThreadData::status() {
- return status_;
+ return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
}
// static
bool ThreadData::TrackingStatus() {
- return status_ > DEACTIVATED;
+ return base::subtle::Acquire_Load(&status_) > DEACTIVATED;
}
// static
@@ -817,7 +817,8 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
worker_thread_data_creation_count_ = 0;
cleanup_count_ = 0;
tls_index_.Set(NULL);
- status_ = DORMANT_DURING_TESTS; // Almost UNINITIALIZED.
+ // Almost UNINITIALIZED.
+ base::subtle::Release_Store(&status_, DORMANT_DURING_TESTS);
// To avoid any chance of racing in unit tests, which is the only place we
// call this function, we may sometimes leak all the data structures we
diff --git a/chromium/base/tracked_objects.h b/chromium/base/tracked_objects.h
index 8f8379409db..e62948d8d07 100644
--- a/chromium/base/tracked_objects.h
+++ b/chromium/base/tracked_objects.h
@@ -12,6 +12,7 @@
#include <utility>
#include <vector>
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/basictypes.h"
#include "base/containers/hash_tables.h"
@@ -199,7 +200,7 @@ class BASE_EXPORT BirthOnThread {
public:
BirthOnThread(const Location& location, const ThreadData& current);
- const Location location() const { return location_; }
+ const Location& location() const { return location_; }
const ThreadData* birth_thread() const { return birth_thread_; }
private:
@@ -661,7 +662,7 @@ class BASE_EXPORT ThreadData {
static base::LazyInstance<base::Lock>::Leaky list_lock_;
// We set status_ to SHUTDOWN when we shut down the tracking service.
- static Status status_;
+ static base::subtle::Atomic32 status_;
// Link to next instance (null terminated list). Used to globally track all
// registered instances (corresponds to all registered threads where we keep
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 55947a4af70..9b2483e7c18 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -351,6 +351,16 @@ bool BinaryValue::Equals(const Value* other) const {
///////////////////// DictionaryValue ////////////////////
+// static
+scoped_ptr<DictionaryValue> DictionaryValue::From(scoped_ptr<Value> value) {
+ DictionaryValue* out;
+ if (value && value->GetAsDictionary(&out)) {
+ ignore_result(value.release());
+ return make_scoped_ptr(out);
+ }
+ return nullptr;
+}
+
DictionaryValue::DictionaryValue()
: Value(TYPE_DICTIONARY) {
}
@@ -491,8 +501,9 @@ bool DictionaryValue::Get(StringPiece path,
delimiter_position != std::string::npos;
delimiter_position = current_path.find('.')) {
const DictionaryValue* child_dictionary = NULL;
- if (!current_dictionary->GetDictionary(
- current_path.substr(0, delimiter_position), &child_dictionary)) {
+ if (!current_dictionary->GetDictionaryWithoutPathExpansion(
+ current_path.substr(0, delimiter_position).as_string(),
+ &child_dictionary)) {
return false;
}
@@ -869,6 +880,16 @@ bool DictionaryValue::Equals(const Value* other) const {
///////////////////// ListValue ////////////////////
+// static
+scoped_ptr<ListValue> ListValue::From(scoped_ptr<Value> value) {
+ ListValue* out;
+ if (value && value->GetAsList(&out)) {
+ ignore_result(value.release());
+ return make_scoped_ptr(out);
+ }
+ return nullptr;
+}
+
ListValue::ListValue() : Value(TYPE_LIST) {
}
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 8756debdbdf..56be542d747 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -209,6 +209,9 @@ class BASE_EXPORT BinaryValue: public Value {
// are |std::string|s and should be UTF-8 encoded.
class BASE_EXPORT DictionaryValue : public Value {
public:
+ // Returns |value| if it is a dictionary, nullptr otherwise.
+ static scoped_ptr<DictionaryValue> From(scoped_ptr<Value> value);
+
DictionaryValue();
~DictionaryValue() override;
@@ -387,6 +390,9 @@ class BASE_EXPORT ListValue : public Value {
typedef ValueVector::iterator iterator;
typedef ValueVector::const_iterator const_iterator;
+ // Returns |value| if it is a list, nullptr otherwise.
+ static scoped_ptr<ListValue> From(scoped_ptr<Value> value);
+
ListValue();
~ListValue() override;
diff --git a/chromium/base/version.cc b/chromium/base/version.cc
index 228dcb8aef2..3677b731980 100644
--- a/chromium/base/version.cc
+++ b/chromium/base/version.cc
@@ -168,10 +168,10 @@ const std::string Version::GetString() const {
std::string version_str;
size_t count = components_.size();
for (size_t i = 0; i < count - 1; ++i) {
- version_str.append(IntToString(components_[i]));
+ version_str.append(UintToString(components_[i]));
version_str.append(".");
}
- version_str.append(IntToString(components_[count - 1]));
+ version_str.append(UintToString(components_[count - 1]));
return version_str;
}
diff --git a/chromium/base/win/OWNERS b/chromium/base/win/OWNERS
index 8624efe736c..9c18edfcb10 100644
--- a/chromium/base/win/OWNERS
+++ b/chromium/base/win/OWNERS
@@ -1,3 +1,2 @@
cpu@chromium.org
grt@chromium.org
-rvargas@chromium.org
diff --git a/chromium/base/win/event_trace_controller.cc b/chromium/base/win/event_trace_controller.cc
index 9a35a6bd57a..ff392a34db0 100644
--- a/chromium/base/win/event_trace_controller.cc
+++ b/chromium/base/win/event_trace_controller.cc
@@ -46,7 +46,8 @@ EtwTraceController::EtwTraceController() : session_(NULL) {
}
EtwTraceController::~EtwTraceController() {
- Stop(NULL);
+ if (session_)
+ Stop(NULL);
}
HRESULT EtwTraceController::Start(const wchar_t* session_name,
diff --git a/chromium/base/win/event_trace_controller_unittest.cc b/chromium/base/win/event_trace_controller_unittest.cc
index a2cd81cf709..317327573bb 100644
--- a/chromium/base/win/event_trace_controller_unittest.cc
+++ b/chromium/base/win/event_trace_controller_unittest.cc
@@ -17,6 +17,7 @@
#include "base/win/event_trace_controller.h"
#include "base/win/event_trace_provider.h"
#include "base/win/scoped_handle.h"
+#include "base/win/windows_version.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -221,13 +222,20 @@ TEST_F(EtwTraceControllerTest, EnableDisable) {
EXPECT_EQ(TRACE_LEVEL_VERBOSE, provider.enable_level());
EXPECT_EQ(kTestProviderFlags, provider.enable_flags());
+ // Consume the callback event of the previous controller.EnableProvider().
+ provider.WaitForCallback();
+
EXPECT_HRESULT_SUCCEEDED(controller.Stop(NULL));
- provider.WaitForCallback();
+ // Windows 7 does not call the callback when Stop() is called so we
+ // can't wait, and enable_level and enable_flags are not zeroed.
+ if (base::win::GetVersion() >= VERSION_WIN8) {
+ provider.WaitForCallback();
- // Session should have wound down.
- EXPECT_EQ(0, provider.enable_level());
- EXPECT_EQ(0, provider.enable_flags());
+ // Session should have wound down.
+ EXPECT_EQ(0, provider.enable_level());
+ EXPECT_EQ(0, provider.enable_flags());
+ }
}
} // namespace win
diff --git a/chromium/base/win/message_window.cc b/chromium/base/win/message_window.cc
index 58010e4f376..57fe64c7981 100644
--- a/chromium/base/win/message_window.cc
+++ b/chromium/base/win/message_window.cc
@@ -7,7 +7,6 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/process/memory.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/win/wrapped_window_proc.h"
const wchar_t kMessageWindowClassName[] = L"Chrome_MessageWindow";
diff --git a/chromium/base/win/object_watcher.cc b/chromium/base/win/object_watcher.cc
index 5ebe1856d31..93efd0622a0 100644
--- a/chromium/base/win/object_watcher.cc
+++ b/chromium/base/win/object_watcher.cc
@@ -16,6 +16,7 @@ ObjectWatcher::ObjectWatcher()
: object_(NULL),
wait_object_(NULL),
origin_loop_(NULL),
+ run_once_(true),
weak_factory_(this) {
}
@@ -23,36 +24,13 @@ ObjectWatcher::~ObjectWatcher() {
StopWatching();
}
-bool ObjectWatcher::StartWatching(HANDLE object, Delegate* delegate) {
- CHECK(delegate);
- if (wait_object_) {
- NOTREACHED() << "Already watching an object";
- return false;
- }
-
- // Since our job is to just notice when an object is signaled and report the
- // result back to this thread, we can just run on a Windows wait thread.
- DWORD wait_flags = WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE;
-
- // DoneWaiting can be synchronously called from RegisterWaitForSingleObject,
- // so set up all state now.
- callback_ = base::Bind(&ObjectWatcher::Signal, weak_factory_.GetWeakPtr(),
- delegate);
- object_ = object;
- origin_loop_ = MessageLoop::current();
-
- if (!RegisterWaitForSingleObject(&wait_object_, object, DoneWaiting,
- this, INFINITE, wait_flags)) {
- DPLOG(FATAL) << "RegisterWaitForSingleObject failed";
- object_ = NULL;
- wait_object_ = NULL;
- return false;
- }
+bool ObjectWatcher::StartWatchingOnce(HANDLE object, Delegate* delegate) {
+ return StartWatchingInternal(object, delegate, true);
+}
- // We need to know if the current message loop is going away so we can
- // prevent the wait thread from trying to access a dead message loop.
- MessageLoop::current()->AddDestructionObserver(this);
- return true;
+bool ObjectWatcher::StartWatchingMultipleTimes(HANDLE object,
+ Delegate* delegate) {
+ return StartWatchingInternal(object, delegate, false);
}
bool ObjectWatcher::StopWatching() {
@@ -93,7 +71,44 @@ void CALLBACK ObjectWatcher::DoneWaiting(void* param, BOOLEAN timed_out) {
// that is always a pointer to a valid ObjectWater.
ObjectWatcher* that = static_cast<ObjectWatcher*>(param);
that->origin_loop_->task_runner()->PostTask(FROM_HERE, that->callback_);
- that->callback_.Reset();
+ if (that->run_once_)
+ that->callback_.Reset();
+}
+
+bool ObjectWatcher::StartWatchingInternal(HANDLE object, Delegate* delegate,
+ bool execute_only_once) {
+ CHECK(delegate);
+ if (wait_object_) {
+ NOTREACHED() << "Already watching an object";
+ return false;
+ }
+ run_once_ = execute_only_once;
+
+ // Since our job is to just notice when an object is signaled and report the
+ // result back to this thread, we can just run on a Windows wait thread.
+ DWORD wait_flags = WT_EXECUTEINWAITTHREAD;
+ if (run_once_)
+ wait_flags |= WT_EXECUTEONLYONCE;
+
+ // DoneWaiting can be synchronously called from RegisterWaitForSingleObject,
+ // so set up all state now.
+ callback_ = base::Bind(&ObjectWatcher::Signal, weak_factory_.GetWeakPtr(),
+ delegate);
+ object_ = object;
+ origin_loop_ = MessageLoop::current();
+
+ if (!RegisterWaitForSingleObject(&wait_object_, object, DoneWaiting,
+ this, INFINITE, wait_flags)) {
+ DPLOG(FATAL) << "RegisterWaitForSingleObject failed";
+ object_ = NULL;
+ wait_object_ = NULL;
+ return false;
+ }
+
+ // We need to know if the current message loop is going away so we can
+ // prevent the wait thread from trying to access a dead message loop.
+ MessageLoop::current()->AddDestructionObserver(this);
+ return true;
}
void ObjectWatcher::Signal(Delegate* delegate) {
@@ -101,7 +116,8 @@ void ObjectWatcher::Signal(Delegate* delegate) {
// StartWatching(). As a result, we save any state we need and clear previous
// watcher state before signaling the delegate.
HANDLE object = object_;
- StopWatching();
+ if (run_once_)
+ StopWatching();
delegate->OnObjectSignaled(object);
}
diff --git a/chromium/base/win/object_watcher.h b/chromium/base/win/object_watcher.h
index d68d9350b38..f4d608553d7 100644
--- a/chromium/base/win/object_watcher.h
+++ b/chromium/base/win/object_watcher.h
@@ -26,16 +26,16 @@ namespace win {
//
// Typical usage:
//
-// class MyClass : public base::ObjectWatcher::Delegate {
+// class MyClass : public base::win::ObjectWatcher::Delegate {
// public:
// void DoStuffWhenSignaled(HANDLE object) {
-// watcher_.StartWatching(object, this);
+// watcher_.StartWatchingOnce(object, this);
// }
-// virtual void OnObjectSignaled(HANDLE object) {
+// void OnObjectSignaled(HANDLE object) override {
// // OK, time to do stuff!
// }
// private:
-// base::ObjectWatcher watcher_;
+// base::win::ObjectWatcher watcher_;
// };
//
// In the above example, MyClass wants to "do stuff" when object becomes
@@ -59,19 +59,23 @@ class BASE_EXPORT ObjectWatcher : public MessageLoop::DestructionObserver {
~ObjectWatcher() override;
// When the object is signaled, the given delegate is notified on the thread
- // where StartWatching is called. The ObjectWatcher is not responsible for
+ // where StartWatchingOnce is called. The ObjectWatcher is not responsible for
// deleting the delegate.
- //
// Returns true if the watch was started. Otherwise, false is returned.
- //
- bool StartWatching(HANDLE object, Delegate* delegate);
+ bool StartWatchingOnce(HANDLE object, Delegate* delegate);
+
+ // Notifies the delegate, on the thread where this method is called, each time
+ // the object is set. By definition, the handle must be an auto-reset object.
+ // The caller must ensure that it (or any Windows system code) doesn't reset
+ // the event or else the delegate won't be called.
+ // Returns true if the watch was started. Otherwise, false is returned.
+ bool StartWatchingMultipleTimes(HANDLE object, Delegate* delegate);
// Stops watching. Does nothing if the watch has already completed. If the
// watch is still active, then it is canceled, and the associated delegate is
// not notified.
//
// Returns true if the watch was canceled. Otherwise, false is returned.
- //
bool StopWatching();
// Returns true if currently watching an object.
@@ -84,6 +88,10 @@ class BASE_EXPORT ObjectWatcher : public MessageLoop::DestructionObserver {
// Called on a background thread when done waiting.
static void CALLBACK DoneWaiting(void* param, BOOLEAN timed_out);
+ // Helper used by StartWatchingOnce and StartWatchingMultipleTimes.
+ bool StartWatchingInternal(HANDLE object, Delegate* delegate,
+ bool execute_only_once);
+
void Signal(Delegate* delegate);
// MessageLoop::DestructionObserver implementation:
@@ -94,7 +102,7 @@ class BASE_EXPORT ObjectWatcher : public MessageLoop::DestructionObserver {
HANDLE object_; // The object being watched
HANDLE wait_object_; // Returned by RegisterWaitForSingleObject
MessageLoop* origin_loop_; // Used to get back to the origin thread
-
+ bool run_once_;
WeakPtrFactory<ObjectWatcher> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(ObjectWatcher);
diff --git a/chromium/base/win/object_watcher_unittest.cc b/chromium/base/win/object_watcher_unittest.cc
index b30ca41a4fe..511ec49d788 100644
--- a/chromium/base/win/object_watcher_unittest.cc
+++ b/chromium/base/win/object_watcher_unittest.cc
@@ -42,7 +42,7 @@ void RunTest_BasicSignal(MessageLoop::Type message_loop_type) {
HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);
QuitDelegate delegate;
- bool ok = watcher.StartWatching(event, &delegate);
+ bool ok = watcher.StartWatchingOnce(event, &delegate);
EXPECT_TRUE(ok);
EXPECT_TRUE(watcher.IsWatching());
EXPECT_EQ(event, watcher.GetWatchedObject());
@@ -64,7 +64,7 @@ void RunTest_BasicCancel(MessageLoop::Type message_loop_type) {
HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);
QuitDelegate delegate;
- bool ok = watcher.StartWatching(event, &delegate);
+ bool ok = watcher.StartWatchingOnce(event, &delegate);
EXPECT_TRUE(ok);
watcher.StopWatching();
@@ -83,7 +83,7 @@ void RunTest_CancelAfterSet(MessageLoop::Type message_loop_type) {
// A manual-reset event that is not yet signaled.
HANDLE event = CreateEvent(NULL, TRUE, FALSE, NULL);
- bool ok = watcher.StartWatching(event, &delegate);
+ bool ok = watcher.StartWatchingOnce(event, &delegate);
EXPECT_TRUE(ok);
SetEvent(event);
@@ -110,7 +110,7 @@ void RunTest_SignalBeforeWatch(MessageLoop::Type message_loop_type) {
HANDLE event = CreateEvent(NULL, TRUE, TRUE, NULL);
QuitDelegate delegate;
- bool ok = watcher.StartWatching(event, &delegate);
+ bool ok = watcher.StartWatchingOnce(event, &delegate);
EXPECT_TRUE(ok);
MessageLoop::current()->Run();
@@ -130,12 +130,53 @@ void RunTest_OutlivesMessageLoop(MessageLoop::Type message_loop_type) {
MessageLoop message_loop(message_loop_type);
QuitDelegate delegate;
- watcher.StartWatching(event, &delegate);
+ watcher.StartWatchingOnce(event, &delegate);
}
}
CloseHandle(event);
}
+class QuitAfterMultipleDelegate : public ObjectWatcher::Delegate {
+ public:
+ QuitAfterMultipleDelegate(HANDLE event, int iterations)
+ : event_(event), iterations_(iterations) {}
+ void OnObjectSignaled(HANDLE object) override {
+ if (--iterations_) {
+ SetEvent(event_);
+ } else {
+ MessageLoop::current()->QuitWhenIdle();
+ }
+ }
+
+ private:
+ HANDLE event_;
+ int iterations_;
+};
+
+void RunTest_ExecuteMultipleTimes(MessageLoop::Type message_loop_type) {
+ MessageLoop message_loop(message_loop_type);
+
+ ObjectWatcher watcher;
+ EXPECT_FALSE(watcher.IsWatching());
+
+ // An auto-reset event that is not yet signaled.
+ HANDLE event = CreateEvent(NULL, FALSE, FALSE, NULL);
+
+ QuitAfterMultipleDelegate delegate(event, 2);
+ bool ok = watcher.StartWatchingMultipleTimes(event, &delegate);
+ EXPECT_TRUE(ok);
+ EXPECT_TRUE(watcher.IsWatching());
+ EXPECT_EQ(event, watcher.GetWatchedObject());
+
+ SetEvent(event);
+
+ MessageLoop::current()->Run();
+
+ EXPECT_TRUE(watcher.IsWatching());
+ EXPECT_TRUE(watcher.StopWatching());
+ CloseHandle(event);
+}
+
} // namespace
//-----------------------------------------------------------------------------
@@ -170,5 +211,11 @@ TEST(ObjectWatcherTest, OutlivesMessageLoop) {
RunTest_OutlivesMessageLoop(MessageLoop::TYPE_UI);
}
+TEST(ObjectWatcherTest, ExecuteMultipleTimes) {
+ RunTest_ExecuteMultipleTimes(MessageLoop::TYPE_DEFAULT);
+ RunTest_ExecuteMultipleTimes(MessageLoop::TYPE_IO);
+ RunTest_ExecuteMultipleTimes(MessageLoop::TYPE_UI);
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/registry.cc b/chromium/base/win/registry.cc
index 47afcbfb77b..28e0461c621 100644
--- a/chromium/base/win/registry.cc
+++ b/chromium/base/win/registry.cc
@@ -82,7 +82,7 @@ bool RegKey::Watcher::StartWatching(HKEY key, const ChangeCallback& callback) {
}
callback_ = callback;
- return object_watcher_.StartWatching(watch_event_.Get(), this);
+ return object_watcher_.StartWatchingOnce(watch_event_.Get(), this);
}
// RegKey ----------------------------------------------------------------------
diff --git a/chromium/base/win/scoped_comptr.h b/chromium/base/win/scoped_comptr.h
index 373c0c3ba50..ade12fe549a 100644
--- a/chromium/base/win/scoped_comptr.h
+++ b/chromium/base/win/scoped_comptr.h
@@ -52,24 +52,24 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// Note that this function equates to IUnknown::Release and should not
// be confused with e.g. scoped_ptr::release().
void Release() {
- if (ptr_ != NULL) {
- ptr_->Release();
- ptr_ = NULL;
+ if (this->ptr_ != NULL) {
+ this->ptr_->Release();
+ this->ptr_ = NULL;
}
}
// Sets the internal pointer to NULL and returns the held object without
// releasing the reference.
Interface* Detach() {
- Interface* p = ptr_;
- ptr_ = NULL;
+ Interface* p = this->ptr_;
+ this->ptr_ = NULL;
return p;
}
// Accepts an interface pointer that has already been addref-ed.
void Attach(Interface* p) {
- DCHECK(!ptr_);
- ptr_ = p;
+ DCHECK(!this->ptr_);
+ this->ptr_ = p;
}
// Retrieves the pointer address.
@@ -77,8 +77,8 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// The function DCHECKs on the current value being NULL.
// Usage: Foo(p.Receive());
Interface** Receive() {
- DCHECK(!ptr_) << "Object leak. Pointer must be NULL";
- return &ptr_;
+ DCHECK(!this->ptr_) << "Object leak. Pointer must be NULL";
+ return &this->ptr_;
}
// A convenience for whenever a void pointer is needed as an out argument.
@@ -89,18 +89,18 @@ class ScopedComPtr : public scoped_refptr<Interface> {
template <class Query>
HRESULT QueryInterface(Query** p) {
DCHECK(p != NULL);
- DCHECK(ptr_ != NULL);
+ DCHECK(this->ptr_ != NULL);
// IUnknown already has a template version of QueryInterface
// so the iid parameter is implicit here. The only thing this
// function adds are the DCHECKs.
- return ptr_->QueryInterface(p);
+ return this->ptr_->QueryInterface(p);
}
// QI for times when the IID is not associated with the type.
HRESULT QueryInterface(const IID& iid, void** obj) {
DCHECK(obj != NULL);
- DCHECK(ptr_ != NULL);
- return ptr_->QueryInterface(iid, obj);
+ DCHECK(this->ptr_ != NULL);
+ return this->ptr_->QueryInterface(iid, obj);
}
// Queries |other| for the interface this object wraps and returns the
@@ -113,18 +113,18 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// Convenience wrapper around CoCreateInstance
HRESULT CreateInstance(const CLSID& clsid, IUnknown* outer = NULL,
DWORD context = CLSCTX_ALL) {
- DCHECK(!ptr_);
+ DCHECK(!this->ptr_);
HRESULT hr = ::CoCreateInstance(clsid, outer, context, *interface_id,
- reinterpret_cast<void**>(&ptr_));
+ reinterpret_cast<void**>(&this->ptr_));
return hr;
}
// Checks if the identity of |other| and this object is the same.
bool IsSameObject(IUnknown* other) {
- if (!other && !ptr_)
+ if (!other && !this->ptr_)
return true;
- if (!other || !ptr_)
+ if (!other || !this->ptr_)
return false;
ScopedComPtr<IUnknown> my_identity;
@@ -147,8 +147,8 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// by statically casting the ScopedComPtr instance to the wrapped interface
// and then making the call... but generally that shouldn't be necessary.
BlockIUnknownMethods* operator->() const {
- DCHECK(ptr_ != NULL);
- return reinterpret_cast<BlockIUnknownMethods*>(ptr_);
+ DCHECK(this->ptr_ != NULL);
+ return reinterpret_cast<BlockIUnknownMethods*>(this->ptr_);
}
// Pull in operator=() from the parent class.
diff --git a/chromium/base/win/scoped_handle.h b/chromium/base/win/scoped_handle.h
index 97fd7a5c79a..d1eb1d688ae 100644
--- a/chromium/base/win/scoped_handle.h
+++ b/chromium/base/win/scoped_handle.h
@@ -27,9 +27,13 @@ namespace win {
// Generic wrapper for raw handles that takes care of closing handles
// automatically. The class interface follows the style of
-// the ScopedFILE class with one addition:
+// the ScopedFILE class with two additions:
// - IsValid() method can tolerate multiple invalid handle values such as NULL
// and INVALID_HANDLE_VALUE (-1) for Win32 handles.
+// - Set() (and the constructors and assignment operators that call it)
+// preserve the Windows LastError code. This ensures that GetLastError() can
+// be called after stashing a handle in a GenericScopedHandle object. Doing
+// this explicitly is necessary because of bug 528394 and VC++ 2015.
template <class Traits, class Verifier>
class GenericScopedHandle {
MOVE_ONLY_TYPE_FOR_CPP_03(GenericScopedHandle, RValue)
@@ -66,6 +70,8 @@ class GenericScopedHandle {
void Set(Handle handle) {
if (handle_ != handle) {
+ // Preserve old LastError to avoid bug 528394.
+ auto last_error = ::GetLastError();
Close();
if (Traits::IsHandleValid(handle)) {
@@ -73,6 +79,7 @@ class GenericScopedHandle {
Verifier::StartTracking(handle, this, BASE_WIN_GET_CALLER,
tracked_objects::GetProgramCounter());
}
+ ::SetLastError(last_error);
}
}
diff --git a/chromium/base/win/scoped_handle_unittest.cc b/chromium/base/win/scoped_handle_unittest.cc
new file mode 100644
index 00000000000..b573b664501
--- /dev/null
+++ b/chromium/base/win/scoped_handle_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/scoped_handle.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(ScopedHandleTest, ScopedHandle) {
+ // Any illegal error code will do. We just need to test that it is preserved
+ // by ScopedHandle to avoid bug 528394.
+ const DWORD magic_error = 0x12345678;
+
+ HANDLE handle = ::CreateMutex(nullptr, FALSE, nullptr);
+ // Call SetLastError after creating the handle.
+ ::SetLastError(magic_error);
+ base::win::ScopedHandle handle_holder(handle);
+ EXPECT_EQ(magic_error, ::GetLastError());
+
+ // Create a new handle and then set LastError again.
+ handle = ::CreateMutex(nullptr, FALSE, nullptr);
+ ::SetLastError(magic_error);
+ handle_holder.Set(handle);
+ EXPECT_EQ(magic_error, ::GetLastError());
+
+ // Create a new handle and then set LastError again.
+ handle = ::CreateMutex(nullptr, FALSE, nullptr);
+ base::win::ScopedHandle handle_source(handle);
+ ::SetLastError(magic_error);
+ handle_holder = handle_source.Pass();
+ EXPECT_EQ(magic_error, ::GetLastError());
+}
diff --git a/chromium/base/win/shortcut.cc b/chromium/base/win/shortcut.cc
index 57f8e615d1d..2dd01a83d9e 100644
--- a/chromium/base/win/shortcut.cc
+++ b/chromium/base/win/shortcut.cc
@@ -26,8 +26,10 @@ namespace win {
namespace {
// String resource IDs in shell32.dll.
-const uint32_t kPinToTaskbarID = 5386;
-const uint32_t kUnpinFromTaskbarID = 5387;
+const uint32_t kPinToTaskbarID = 5386; // Win7+
+const uint32_t kUnpinFromTaskbarID = 5387; // Win7+
+const uint32_t kPinToStartID = 51201; // Win8+
+const uint32_t kUnpinFromStartID = 51394; // Win10+
// Traits for a GenericScopedHandle that will free a module on closure.
struct ModuleTraits {
@@ -403,7 +405,7 @@ bool ResolveShortcut(const FilePath& shortcut_path,
return true;
}
-bool TaskbarPinShortcutLink(const FilePath& shortcut) {
+bool PinShortcutToTaskbar(const FilePath& shortcut) {
base::ThreadRestrictions::AssertIOAllowed();
// "Pin to taskbar" is only supported after Win7.
@@ -413,7 +415,7 @@ bool TaskbarPinShortcutLink(const FilePath& shortcut) {
return DoVerbOnFile(kPinToTaskbarID, shortcut);
}
-bool TaskbarUnpinShortcutLink(const FilePath& shortcut) {
+bool UnpinShortcutFromTaskbar(const FilePath& shortcut) {
base::ThreadRestrictions::AssertIOAllowed();
// "Unpin from taskbar" is only supported after Win7.
@@ -423,5 +425,30 @@ bool TaskbarUnpinShortcutLink(const FilePath& shortcut) {
return DoVerbOnFile(kUnpinFromTaskbarID, shortcut);
}
+bool PinShortcutToStart(const FilePath& shortcut) {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ // While "Pin to Start" is supported as of Win8, it was never used by Chrome
+ // in Win8. The behaviour on Win8 is different (new shortcut every time
+ // instead of a single pin associated with its app id) and the Start Menu
+ // shortcut itself is visible on the Start Screen whereas it is not on Win10.
+ // For simplicity's sake and per greater necessity on Win10, it is only
+ // supported in Chrome on Win10+.
+ if (GetVersion() < VERSION_WIN10)
+ return false;
+
+ return DoVerbOnFile(kPinToStartID, shortcut);
+}
+
+bool UnpinShortcutFromStart(const FilePath& shortcut) {
+ base::ThreadRestrictions::AssertIOAllowed();
+
+ // "Unpin from Start Menu" is only supported after Win10.
+ if (GetVersion() < VERSION_WIN10)
+ return false;
+
+ return DoVerbOnFile(kUnpinFromStartID, shortcut);
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/shortcut.h b/chromium/base/win/shortcut.h
index 6c85f0151d8..4ff2912afeb 100644
--- a/chromium/base/win/shortcut.h
+++ b/chromium/base/win/shortcut.h
@@ -152,15 +152,29 @@ BASE_EXPORT bool ResolveShortcut(const FilePath& shortcut_path,
FilePath* target_path,
string16* args);
-// Pins a shortcut to the Windows 7 taskbar. The shortcut file must already
+// Pins a shortcut to the Windows 7+ taskbar. The |shortcut| file must already
// exist and be a shortcut that points to an executable. The app id of the
// shortcut is used to group windows and must be set correctly.
-BASE_EXPORT bool TaskbarPinShortcutLink(const FilePath& shortcut);
+BASE_EXPORT bool PinShortcutToTaskbar(const FilePath& shortcut);
-// Unpins a shortcut from the Windows 7 taskbar. The shortcut must exist and
+// Unpins a shortcut from the Windows 7+ taskbar. The |shortcut| must exist and
// already be pinned to the taskbar. The app id of the shortcut is used as the
// identifier for the taskbar item to remove and must be set correctly.
-BASE_EXPORT bool TaskbarUnpinShortcutLink(const FilePath& shortcut);
+BASE_EXPORT bool UnpinShortcutFromTaskbar(const FilePath& shortcut);
+
+// Pins a shortcut to the Windows 10+ start menu. The |shortcut| file must
+// already exist and be a shortcut that points to an executable. The app id of
+// the shortcut is used as an identifier by the shell to know that all shortcuts
+// with this app id point to this pin (i.e. "Unpin" instead of "Pin" in those
+// shortcuts' context menus). Unpinning is unecessary on uninstall as Windows
+// handles getting rid of stale Start pins.
+BASE_EXPORT bool PinShortcutToStart(const FilePath& shortcut);
+
+// Unpins a shortcut from the Windows 10+ start menu. The |shortcut| must exist
+// and already be pinned to the start menu. The app id of the shortcut is used
+// as the identifier for the start menu item to remove and must be set
+// correctly.
+BASE_EXPORT bool UnpinShortcutFromStart(const FilePath& shortcut);
} // namespace win
} // namespace base
diff --git a/chromium/base/win/win_util.cc b/chromium/base/win/win_util.cc
index 96d646ad31f..98d451f9cc7 100644
--- a/chromium/base/win/win_util.cc
+++ b/chromium/base/win/win_util.cc
@@ -57,6 +57,44 @@ void __cdecl ForceCrashOnSigAbort(int) {
*((volatile int*)0) = 0x1337;
}
+typedef decltype(GetProcessMitigationPolicy)* GetProcessMitigationPolicyType;
+
+class LazyIsUser32AndGdi32Available {
+ public:
+ LazyIsUser32AndGdi32Available() : value_(!IsWin32kSyscallsDisabled()) {}
+
+ ~LazyIsUser32AndGdi32Available() {}
+
+ bool value() { return value_; }
+
+ private:
+ static bool IsWin32kSyscallsDisabled() {
+ // Can't disable win32k prior to windows 8.
+ if (base::win::GetVersion() < base::win::VERSION_WIN8)
+ return false;
+
+ GetProcessMitigationPolicyType get_process_mitigation_policy_func =
+ reinterpret_cast<GetProcessMitigationPolicyType>(GetProcAddress(
+ GetModuleHandle(L"kernel32.dll"), "GetProcessMitigationPolicy"));
+
+ if (!get_process_mitigation_policy_func)
+ return false;
+
+ PROCESS_MITIGATION_SYSTEM_CALL_DISABLE_POLICY policy = {};
+ if (get_process_mitigation_policy_func(GetCurrentProcess(),
+ ProcessSystemCallDisablePolicy,
+ &policy, sizeof(policy))) {
+ return policy.DisallowWin32kSystemCalls != 0;
+ }
+
+ return false;
+ }
+
+ const bool value_;
+
+ DISALLOW_COPY_AND_ASSIGN(LazyIsUser32AndGdi32Available);
+};
+
const wchar_t kWindows8OSKRegPath[] =
L"Software\\Classes\\CLSID\\{054AAE20-4BEA-4347-8A35-64A533254A9D}"
L"\\LocalServer32";
@@ -537,5 +575,11 @@ bool MaybeHasSHA256Support() {
return true; // New enough to have SHA-256 support.
}
+bool IsUser32AndGdi32Available() {
+ static base::LazyInstance<LazyIsUser32AndGdi32Available>::Leaky available =
+ LAZY_INSTANCE_INITIALIZER;
+ return available.Get().value();
+}
+
} // namespace win
} // namespace base
diff --git a/chromium/base/win/win_util.h b/chromium/base/win/win_util.h
index 9f42e445748..3c900ff9b0a 100644
--- a/chromium/base/win/win_util.h
+++ b/chromium/base/win/win_util.h
@@ -166,6 +166,13 @@ BASE_EXPORT void SetDomainStateForTesting(bool state);
// run-time detection of this capability.
BASE_EXPORT bool MaybeHasSHA256Support();
+// Returns true if the current process can make USER32 or GDI32 calls such as
+// CreateWindow and CreateDC. Windows 8 and above allow the kernel component
+// of these calls to be disabled which can cause undefined behaviour such as
+// crashes. This function can be used to guard areas of code using these calls
+// and provide a fallback path if necessary.
+BASE_EXPORT bool IsUser32AndGdi32Available();
+
} // namespace win
} // namespace base