summaryrefslogtreecommitdiff
path: root/chromium/tools
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-11-20 15:06:40 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-11-22 11:48:58 +0000
commitdaa093eea7c773db06799a13bd7e4e2e2a9f8f14 (patch)
tree96cc5e7b9194c1b29eab927730bfa419e7111c25 /chromium/tools
parentbe59a35641616a4cf23c4a13fa0632624b021c1b (diff)
downloadqtwebengine-chromium-daa093eea7c773db06799a13bd7e4e2e2a9f8f14.tar.gz
BASELINE: Update Chromium to 63.0.3239.58
Change-Id: Ia93b322a00ba4dd4004f3bcf1254063ba90e1605 Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/tools')
-rw-r--r--chromium/tools/OWNERS1
-rw-r--r--chromium/tools/accessibility/DEPS3
-rw-r--r--chromium/tools/accessibility/inspect/BUILD.gn22
-rw-r--r--chromium/tools/accessibility/inspect/README.md9
-rw-r--r--chromium/tools/accessibility/inspect/ax_dump_events.cc29
-rw-r--r--chromium/tools/accessibility/inspect/ax_event_server.cc39
-rw-r--r--chromium/tools/accessibility/inspect/ax_event_server.h26
-rwxr-xr-xchromium/tools/accessibility/rebase_dump_accessibility_tree_test.py20
-rw-r--r--chromium/tools/battor_agent/BUILD.gn2
-rw-r--r--chromium/tools/battor_agent/battor_agent.cc99
-rw-r--r--chromium/tools/battor_agent/battor_agent.h39
-rw-r--r--chromium/tools/battor_agent/battor_agent_bin.cc134
-rw-r--r--chromium/tools/battor_agent/battor_agent_unittest.cc62
-rw-r--r--chromium/tools/battor_agent/battor_connection_impl.cc44
-rw-r--r--chromium/tools/battor_agent/battor_connection_impl.h6
-rw-r--r--chromium/tools/battor_agent/battor_connection_impl_unittest.cc17
-rw-r--r--chromium/tools/battor_agent/battor_sample_converter.cc6
-rw-r--r--chromium/tools/battor_agent/battor_sample_converter.h3
-rw-r--r--chromium/tools/binary_size/README.md6
-rwxr-xr-xchromium/tools/binary_size/diagnose_bloat.py73
-rw-r--r--chromium/tools/binary_size/libsupersize/concurrent.py60
-rwxr-xr-xchromium/tools/binary_size/libsupersize/concurrent_test.py111
-rw-r--r--chromium/tools/binary_size/libsupersize/console.py55
-rw-r--r--chromium/tools/binary_size/libsupersize/describe.py413
-rwxr-xr-xchromium/tools/binary_size/libsupersize/integration_test.py13
-rwxr-xr-xchromium/tools/binary_size/libsupersize/main.py16
-rw-r--r--chromium/tools/binary_size/libsupersize/nm.py7
-rw-r--r--chromium/tools/cfi/blacklist.txt6
-rwxr-xr-xchromium/tools/checklicenses/checklicenses.py10
-rw-r--r--chromium/tools/checkteamtags/PRESUBMIT.py6
-rw-r--r--chromium/tools/chrome_extensions/chromium_code_coverage/js/app.js420
-rw-r--r--chromium/tools/chrome_extensions/chromium_code_coverage/manifest.json18
-rw-r--r--chromium/tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py25
-rw-r--r--chromium/tools/chrome_proxy/webdriver/client_config.py64
-rw-r--r--chromium/tools/chrome_proxy/webdriver/common.py75
-rw-r--r--chromium/tools/chrome_proxy/webdriver/cross_origin_push.py44
-rw-r--r--chromium/tools/chrome_proxy/webdriver/lite_page.py36
-rw-r--r--chromium/tools/chrome_proxy/webdriver/smoke.py47
-rw-r--r--chromium/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp2
-rw-r--r--chromium/tools/clang/plugins/ChromeClassTester.cpp52
-rw-r--r--chromium/tools/clang/plugins/ChromeClassTester.h15
-rw-r--r--chromium/tools/clang/plugins/FindBadConstructsConsumer.cpp223
-rw-r--r--chromium/tools/clang/plugins/FindBadConstructsConsumer.h13
-rw-r--r--chromium/tools/clang/scripts/InstructionCombining.cpp3306
-rwxr-xr-xchromium/tools/clang/scripts/package.py34
-rwxr-xr-xchromium/tools/clang/scripts/run_tool.py16
-rwxr-xr-xchromium/tools/clang/scripts/test_tool.py6
-rwxr-xr-xchromium/tools/clang/scripts/update.py60
-rw-r--r--chromium/tools/clang/traffic_annotation_extractor/traffic_annotation_extractor.cpp3
-rw-r--r--chromium/tools/cygprofile/BUILD.gn18
-rwxr-xr-xchromium/tools/cygprofile/cyglog_to_orderfile.py4
-rw-r--r--chromium/tools/cygprofile/cygprofile.cc34
-rw-r--r--chromium/tools/cygprofile/cygprofile.h5
-rw-r--r--chromium/tools/cygprofile/cygprofile_perftest.cc67
-rw-r--r--chromium/tools/cygprofile/memory_top_10_mobile_000.wprgo.sha11
-rwxr-xr-xchromium/tools/cygprofile/profile_android_startup.py83
-rw-r--r--chromium/tools/determinism/deterministic_build_whitelist.pyl1
-rw-r--r--chromium/tools/fuchsia/OWNERS1
-rwxr-xr-xchromium/tools/fuchsia/local-sdk.py16
-rwxr-xr-xchromium/tools/fuchsia/run-swarmed.py145
-rw-r--r--chromium/tools/gdb/gdb_chrome.py2
-rw-r--r--chromium/tools/gn/analyzer_unittest.cc148
-rw-r--r--chromium/tools/gn/args.cc2
-rwxr-xr-xchromium/tools/gn/bootstrap/bootstrap.py24
-rw-r--r--chromium/tools/gn/build_settings.h2
-rw-r--r--chromium/tools/gn/bundle_data.cc21
-rw-r--r--chromium/tools/gn/bundle_data.h15
-rw-r--r--chromium/tools/gn/bundle_file_rule.cc3
-rw-r--r--chromium/tools/gn/command_format.cc2
-rw-r--r--chromium/tools/gn/command_gen.cc6
-rw-r--r--chromium/tools/gn/create_bundle_target_generator.cc29
-rw-r--r--chromium/tools/gn/create_bundle_target_generator.h1
-rw-r--r--chromium/tools/gn/deps_iterator.h2
-rw-r--r--chromium/tools/gn/desc_builder.cc2
-rw-r--r--chromium/tools/gn/docs/reference.md98
-rw-r--r--chromium/tools/gn/format_test_data/062.gn10
-rw-r--r--chromium/tools/gn/format_test_data/062.golden15
-rw-r--r--chromium/tools/gn/function_toolchain.cc11
-rw-r--r--chromium/tools/gn/functions.cc2
-rw-r--r--chromium/tools/gn/functions_target.cc48
-rw-r--r--chromium/tools/gn/header_checker.cc3
-rw-r--r--chromium/tools/gn/misc/emacs/gn-mode.el2
-rw-r--r--chromium/tools/gn/ninja_create_bundle_target_writer.cc55
-rw-r--r--chromium/tools/gn/ninja_create_bundle_target_writer_unittest.cc96
-rw-r--r--chromium/tools/gn/pool.h6
-rw-r--r--chromium/tools/gn/runtime_deps_unittest.cc8
-rw-r--r--chromium/tools/gn/substitution_type.cc108
-rw-r--r--chromium/tools/gn/substitution_type.h60
-rw-r--r--chromium/tools/gn/variables.cc48
-rw-r--r--chromium/tools/gn/variables.h8
-rw-r--r--chromium/tools/gn/visual_studio_writer.cc6
-rw-r--r--chromium/tools/gn/visual_studio_writer_unittest.cc4
-rw-r--r--chromium/tools/gn/xcode_object.cc12
-rw-r--r--chromium/tools/gn/xcode_object_unittest.cc110
-rwxr-xr-xchromium/tools/grit/grit/format/data_pack.py8
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/PRESUBMIT.py29
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/__init__.py10
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/policy_template_generator.py157
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/policy_template_generator_unittest.py395
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/template_formatter.py73
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writer_configuration.py84
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/__init__.py10
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/adm_writer.py275
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/adm_writer_unittest.py1127
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/adml_writer.py183
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/adml_writer_unittest.py450
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/admx_writer.py392
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/admx_writer_unittest.py577
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer.py97
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer_unittest.py85
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/doc_writer.py758
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/doc_writer_unittest.py996
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/json_writer.py95
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/json_writer_unittest.py429
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/mock_writer.py30
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/plist_helper.py15
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer.py82
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer_unittest.py411
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/plist_writer.py161
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/plist_writer_unittest.py691
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/reg_writer.py117
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/reg_writer_unittest.py392
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/template_writer.py323
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/template_writer_unittest.py84
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/writer_unittest_common.py83
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/xml_formatted_writer.py91
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates/writers/xml_writer_base_unittest.py40
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates_json.py25
-rwxr-xr-xchromium/tools/grit/grit/format/policy_templates_json_unittest.py161
-rwxr-xr-xchromium/tools/grit/grit/grd_reader.py21
-rwxr-xr-xchromium/tools/grit/grit/grd_reader_unittest.py15
-rwxr-xr-xchromium/tools/grit/grit/node/base.py23
-rwxr-xr-xchromium/tools/grit/grit/node/include.py13
-rwxr-xr-xchromium/tools/grit/grit/node/structure.py7
-rwxr-xr-xchromium/tools/grit/grit/node/structure_unittest.py30
-rwxr-xr-xchromium/tools/grit/grit/test_suite_all.py40
-rwxr-xr-xchromium/tools/grit/grit/tool/build.py13
-rw-r--r--chromium/tools/grit/grit_rule.gni19
-rw-r--r--chromium/tools/grit/repack.gni1
-rw-r--r--chromium/tools/gritsettings/resource_ids16
-rwxr-xr-xchromium/tools/idl_parser/idl_lexer.py1
-rwxr-xr-xchromium/tools/idl_parser/idl_node.py26
-rwxr-xr-xchromium/tools/idl_parser/idl_parser.py192
-rwxr-xr-xchromium/tools/idl_parser/idl_parser_test.py7
-rw-r--r--chromium/tools/idl_parser/test_parser/dictionary_web.idl197
-rw-r--r--chromium/tools/idl_parser/test_parser/interface_web.idl213
-rw-r--r--chromium/tools/idl_parser/test_parser/namespace_web.idl150
-rw-r--r--chromium/tools/idl_parser/test_parser/typedef_web.idl6
-rw-r--r--chromium/tools/ipc_fuzzer/fuzzer/fuzzer.cc18
-rw-r--r--chromium/tools/ipc_fuzzer/message_lib/BUILD.gn3
-rw-r--r--chromium/tools/ipc_fuzzer/message_lib/all_messages.h4
-rw-r--r--chromium/tools/ipc_fuzzer/message_tools/BUILD.gn1
-rw-r--r--chromium/tools/ipc_fuzzer/message_tools/message_list.cc8
-rwxr-xr-xchromium/tools/json_comment_eater/json_comment_eater_test.py4
-rw-r--r--chromium/tools/json_schema_compiler/cc_generator.py16
-rw-r--r--chromium/tools/json_schema_compiler/cpp_bundle_generator.py49
-rw-r--r--chromium/tools/json_schema_compiler/h_generator.py2
-rw-r--r--chromium/tools/json_schema_compiler/json_features.gni14
-rw-r--r--chromium/tools/json_schema_compiler/util.cc2
-rwxr-xr-xchromium/tools/licenses.py7
-rw-r--r--chromium/tools/lldb/OWNERS4
-rw-r--r--chromium/tools/lldb/lldb_chrome.py36
-rw-r--r--chromium/tools/luci-go/linux64/isolate.sha12
-rw-r--r--chromium/tools/luci-go/mac64/isolate.sha12
-rw-r--r--chromium/tools/luci-go/win64/isolate.exe.sha12
-rwxr-xr-xchromium/tools/mb/mb.py11
-rw-r--r--chromium/tools/mb/mb_config.pyl192
-rw-r--r--chromium/tools/metrics/BUILD.gn68
-rw-r--r--chromium/tools/msan/blacklist.txt2
-rw-r--r--chromium/tools/perf/chrome_telemetry_build/BUILD.gn4
-rw-r--r--chromium/tools/perf/contrib/vr_benchmarks/BUILD.gn7
-rwxr-xr-xchromium/tools/roll_angle.py14
-rwxr-xr-xchromium/tools/roll_swiftshader.py21
-rwxr-xr-xchromium/tools/roll_webgl_conformance.py14
-rwxr-xr-xchromium/tools/roll_webrtc.py90
-rwxr-xr-xchromium/tools/safely-roll-deps.py3
-rw-r--r--chromium/tools/traffic_annotation/README.md66
-rw-r--r--chromium/tools/traffic_annotation/auditor/BUILD.gn33
-rw-r--r--chromium/tools/traffic_annotation/auditor/auditor_result.cc7
-rw-r--r--chromium/tools/traffic_annotation/auditor/auditor_result.h16
-rw-r--r--chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.cc109
-rw-r--r--chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.h20
-rw-r--r--chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_ui.cc97
-rw-r--r--chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_unittest.cc90
-rw-r--r--chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.cc223
-rw-r--r--chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.h54
-rw-r--r--chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_auditor.sha11
-rw-r--r--chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_extractor.sha11
-rw-r--r--chromium/tools/traffic_annotation/sample_traffic_annotation.cc9
-rw-r--r--chromium/tools/traffic_annotation/scripts/README.md7
-rwxr-xr-xchromium/tools/traffic_annotation/scripts/annotations_xml_downstream_caller.py47
-rwxr-xr-xchromium/tools/traffic_annotation/scripts/check_annotations.py173
-rw-r--r--chromium/tools/traffic_annotation/summary/README.md14
-rw-r--r--chromium/tools/traffic_annotation/summary/annotations.xml390
-rwxr-xr-xchromium/tools/uberblame.py556
-rw-r--r--chromium/tools/v8_context_snapshot/BUILD.gn21
-rw-r--r--chromium/tools/v8_context_snapshot/run.py15
-rw-r--r--chromium/tools/v8_context_snapshot/v8_context_snapshot.gni18
-rw-r--r--chromium/tools/valgrind/browser_wrapper_win.py49
-rwxr-xr-xchromium/tools/valgrind/drmemory.bat5
-rwxr-xr-xchromium/tools/valgrind/drmemory_analyze.py202
-rw-r--r--chromium/tools/valgrind/gtest_exclude/unit_tests.gtest_linux.txt10
-rwxr-xr-xchromium/tools/valgrind/suppressions.py5
-rwxr-xr-xchromium/tools/valgrind/test_suppressions.py4
-rw-r--r--chromium/tools/variations/fieldtrial_to_struct_unittest.py12
-rw-r--r--chromium/tools/web_dev_style/js_checker.py8
-rwxr-xr-xchromium/tools/web_dev_style/js_checker_test.py53
-rw-r--r--chromium/tools/win/DebugVisualizers/webkit.natvis12
208 files changed, 5454 insertions, 14539 deletions
diff --git a/chromium/tools/OWNERS b/chromium/tools/OWNERS
index 945ca919fe1..c18ff5774f1 100644
--- a/chromium/tools/OWNERS
+++ b/chromium/tools/OWNERS
@@ -40,7 +40,6 @@ per-file include_tracer.py=thakis@chromium.org
per-file ipc_messages_log.py=yfriedman@chromium.org
per-file licenses.py=phajdan.jr@chromium.org
-per-file licenses.py=sgurun@chromium.org
per-file licenses.py=torne@chromium.org
per-file remove_stale_pyc_files.py=dtu@chromium.org
diff --git a/chromium/tools/accessibility/DEPS b/chromium/tools/accessibility/DEPS
new file mode 100644
index 00000000000..02d3a21cd83
--- /dev/null
+++ b/chromium/tools/accessibility/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+content/browser/accessibility",
+]
diff --git a/chromium/tools/accessibility/inspect/BUILD.gn b/chromium/tools/accessibility/inspect/BUILD.gn
new file mode 100644
index 00000000000..c6e2c27f744
--- /dev/null
+++ b/chromium/tools/accessibility/inspect/BUILD.gn
@@ -0,0 +1,22 @@
+# Use of this source code is governed by a BSD-style license that can be
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# found in the LICENSE file.
+
+executable("ax_dump_events") {
+ testonly = true
+
+ sources = [
+ "ax_dump_events.cc",
+ "ax_event_server.cc",
+ ]
+
+ deps = [
+ "//base",
+ "//base/test:test_support",
+ "//content/test:test_support",
+ ]
+
+ if (is_win) {
+ libs = [ "oleacc.lib" ]
+ }
+}
diff --git a/chromium/tools/accessibility/inspect/README.md b/chromium/tools/accessibility/inspect/README.md
new file mode 100644
index 00000000000..af2ee4e969b
--- /dev/null
+++ b/chromium/tools/accessibility/inspect/README.md
@@ -0,0 +1,9 @@
+# ax_dump_events
+
+This tool helps monitor accessibility events. It currently works on Windows,
+and Mac is TBD.
+
+Events are currently dumped to the console. To use it, run
+`ax_dump_events --pid=[processid]`
+
+Press Ctrl+C to quit. \ No newline at end of file
diff --git a/chromium/tools/accessibility/inspect/ax_dump_events.cc b/chromium/tools/accessibility/inspect/ax_dump_events.cc
new file mode 100644
index 00000000000..53da75825c8
--- /dev/null
+++ b/chromium/tools/accessibility/inspect/ax_dump_events.cc
@@ -0,0 +1,29 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+
+#include "base/at_exit.h"
+#include "base/command_line.h"
+#include "base/strings/string_number_conversions.h"
+#include "tools/accessibility/inspect/ax_event_server.h"
+
+char kPidSwitch[] = "pid";
+
+int main(int argc, char** argv) {
+ base::AtExitManager at_exit_manager;
+ // TODO(aleventhal) Want callback after Ctrl+C or some global keystroke:
+ // base::AtExitManager::RegisterCallback(content::OnExit, nullptr);
+
+ base::CommandLine::Init(argc, argv);
+ std::string pid_str =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(kPidSwitch);
+ int pid = 0;
+ if (!pid_str.empty())
+ base::StringToInt(pid_str, &pid);
+
+ std::unique_ptr<content::AXEventServer> server(
+ new content::AXEventServer(pid));
+ return 0;
+}
diff --git a/chromium/tools/accessibility/inspect/ax_event_server.cc b/chromium/tools/accessibility/inspect/ax_event_server.cc
new file mode 100644
index 00000000000..30256ba6b25
--- /dev/null
+++ b/chromium/tools/accessibility/inspect/ax_event_server.cc
@@ -0,0 +1,39 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/accessibility/inspect/ax_event_server.h"
+
+#include <string>
+
+#include "base/at_exit.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/run_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/test/scoped_task_environment.h"
+
+namespace content {
+
+static void OnEvent(std::string event) {
+ printf("Event %s\n", event.c_str());
+}
+
+AXEventServer::AXEventServer(int pid)
+ : recorder_(AccessibilityEventRecorder::Create(nullptr, pid)) {
+ printf("Events for process id: %d\n", pid);
+
+ base::test::ScopedTaskEnvironment scoped_task_environment(
+ base::test::ScopedTaskEnvironment::MainThreadType::UI);
+
+ recorder_->ListenToEvents(&OnEvent);
+
+ base::RunLoop run_loop;
+ run_loop.Run();
+}
+
+AXEventServer::~AXEventServer() {
+ delete recorder_.release();
+}
+
+} // namespace content
diff --git a/chromium/tools/accessibility/inspect/ax_event_server.h b/chromium/tools/accessibility/inspect/ax_event_server.h
new file mode 100644
index 00000000000..e15f05b456e
--- /dev/null
+++ b/chromium/tools/accessibility/inspect/ax_event_server.h
@@ -0,0 +1,26 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef AX_EVENT_SERVER_H_
+#define AX_EVENT_SERVER_H_
+
+#include <string>
+
+#include "content/browser/accessibility/accessibility_event_recorder.h"
+
+namespace content {
+
+class AXEventServer {
+ public:
+ explicit AXEventServer(int pid);
+
+ ~AXEventServer();
+
+ private:
+ std::unique_ptr<AccessibilityEventRecorder> recorder_;
+};
+
+} // namespace content
+
+#endif // AX_EVENT_SERVER_H_
diff --git a/chromium/tools/accessibility/rebase_dump_accessibility_tree_test.py b/chromium/tools/accessibility/rebase_dump_accessibility_tree_test.py
index 80db0ccac78..7f3aed08c5d 100755
--- a/chromium/tools/accessibility/rebase_dump_accessibility_tree_test.py
+++ b/chromium/tools/accessibility/rebase_dump_accessibility_tree_test.py
@@ -27,10 +27,6 @@ import time
import urllib
import urlparse
-# Load BeautifulSoup. It's checked into two places in the Chromium tree.
-sys.path.append('third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/')
-from BeautifulSoup import BeautifulSoup
-
# The location of the DumpAccessibilityTree html test files and expectations.
TEST_DATA_PATH = os.path.join(os.getcwd(), 'content/test/data/accessibility')
@@ -123,24 +119,28 @@ def Run():
platform = tokens[6]
build = tokens[8]
logdog_prefix = 'chromium/bb/%s/%s/%s' % (bucket, platform, build)
- logdog_steps = '%s/+/recipes/steps/*/*/*' % logdog_prefix
+ logdog_steps = '%s/+/recipes/steps/**' % logdog_prefix
logdog_query = 'cit logdog query -results 999 -path "%s"' % logdog_steps
print (BRIGHT_COLOR + '=> %s' + NORMAL_COLOR) % logdog_query
steps = os.popen(logdog_query).readlines()
a11y_step = None
for step in steps:
- if (step.find('content_browsertests') >= 0 and
+ if (step.find('/content_browsertests') >= 0 and
step.find('with_patch') >= 0 and
step.find('trigger') == -1 and
+ step.find('swarming.summary') == -1 and
+ step.find('step_metadata') == -1 and
step.find('Upload') == -1):
+
a11y_step = step.rstrip()
+ logdog_cat = 'cit logdog cat -raw "chromium%s"' % a11y_step
+ # A bit noisy but useful for debugging.
+ # print (BRIGHT_COLOR + '=> %s' + NORMAL_COLOR) % logdog_cat
+ output = os.popen(logdog_cat).read()
+ ParseLog(output)
if not a11y_step:
print 'No content_browsertests (with patch) step found'
continue
- logdog_cat = 'cit logdog cat -raw "chromium%s"' % a11y_step
- print (BRIGHT_COLOR + '=> %s' + NORMAL_COLOR) % logdog_cat
- output = os.popen(logdog_cat).read()
- ParseLog(output)
if __name__ == '__main__':
sys.exit(Run())
diff --git a/chromium/tools/battor_agent/BUILD.gn b/chromium/tools/battor_agent/BUILD.gn
index 1a7dc67c7a5..5339b67f3ce 100644
--- a/chromium/tools/battor_agent/BUILD.gn
+++ b/chromium/tools/battor_agent/BUILD.gn
@@ -16,7 +16,6 @@ executable("battor_agent") {
"//base",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
- "//device/serial",
]
}
@@ -58,7 +57,6 @@ test("battor_agent_unittests") {
"//base",
"//base/test:run_all_unittests",
"//base/test:test_support",
- "//device/serial",
"//device/serial:test_support",
"//testing/gmock",
"//testing/gtest",
diff --git a/chromium/tools/battor_agent/battor_agent.cc b/chromium/tools/battor_agent/battor_agent.cc
index 81dd0d36e7e..7c126f948d6 100644
--- a/chromium/tools/battor_agent/battor_agent.cc
+++ b/chromium/tools/battor_agent/battor_agent.cc
@@ -3,10 +3,12 @@
// found in the LICENSE file.
#include "tools/battor_agent/battor_agent.h"
+#include <algorithm>
#include <iomanip>
+#include <vector>
#include "base/bind.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "tools/battor_agent/battor_connection_impl.h"
#include "tools/battor_agent/battor_sample_converter.h"
@@ -29,9 +31,6 @@ const uint16_t kCommandRetryDelaySeconds = 2;
// The number of seconds allowed for a control message before timing out.
const uint8_t kBattOrControlMessageTimeoutSeconds = 2;
-// The number of seconds allowed for connection to open before timing out.
-const uint8_t kBattOrConnectionTimeoutSeconds = 10;
-
// Returns true if the specified vector of bytes decodes to a message that is an
// ack for the specified control message type.
bool IsAckOfControlCommand(BattOrMessageType message_type,
@@ -105,28 +104,40 @@ bool ParseSampleFrame(BattOrMessageType type,
}
} // namespace
+BattOrResults::BattOrResults() {}
+
+BattOrResults::BattOrResults(std::string details,
+ std::vector<float> power_samples_W,
+ uint32_t sample_rate)
+ : details_(std::move(details)),
+ power_samples_W_(std::move(power_samples_W)),
+ sample_rate_(sample_rate) {}
+
+BattOrResults::BattOrResults(const BattOrResults&) = default;
+
+BattOrResults::~BattOrResults() {}
+
BattOrAgent::BattOrAgent(
const std::string& path,
Listener* listener,
scoped_refptr<base::SingleThreadTaskRunner> ui_thread_task_runner)
- : connection_(new BattOrConnectionImpl(path,
- this,
- ui_thread_task_runner)),
+ : connection_(new BattOrConnectionImpl(path, this, ui_thread_task_runner)),
+ tick_clock_(std::make_unique<base::DefaultTickClock>()),
listener_(listener),
last_action_(Action::INVALID),
command_(Command::INVALID),
num_command_attempts_(0) {
- // We don't care what thread the constructor is called on - we only care that
- // all of the other method invocations happen on the same thread.
- thread_checker_.DetachFromThread();
+ // We don't care what sequence the constructor is called on - we only care
+ // that all of the other method invocations happen on the same sequence.
+ DETACH_FROM_SEQUENCE(sequence_checker_);
}
BattOrAgent::~BattOrAgent() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
void BattOrAgent::StartTracing() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// When tracing is restarted, all previous clock sync markers are invalid.
clock_sync_markers_.clear();
@@ -137,14 +148,14 @@ void BattOrAgent::StartTracing() {
}
void BattOrAgent::StopTracing() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
command_ = Command::STOP_TRACING;
PerformAction(Action::REQUEST_CONNECTION);
}
void BattOrAgent::RecordClockSyncMarker(const std::string& marker) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
command_ = Command::RECORD_CLOCK_SYNC_MARKER;
pending_clock_sync_marker_ = marker;
@@ -152,24 +163,19 @@ void BattOrAgent::RecordClockSyncMarker(const std::string& marker) {
}
void BattOrAgent::GetFirmwareGitHash() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
command_ = Command::GET_FIRMWARE_GIT_HASH;
PerformAction(Action::REQUEST_CONNECTION);
}
void BattOrAgent::BeginConnect() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- SetActionTimeout(kBattOrConnectionTimeoutSeconds);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
connection_->Open();
}
void BattOrAgent::OnConnectionOpened(bool success) {
- // Cancel timeout because the connection was opened in time.
- timeout_callback_.Cancel();
-
if (!success) {
CompleteCommand(BATTOR_ERROR_CONNECTION_FAILED);
return;
@@ -195,7 +201,7 @@ void BattOrAgent::OnConnectionOpened(bool success) {
}
void BattOrAgent::OnBytesSent(bool success) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!success) {
CompleteCommand(BATTOR_ERROR_SEND_ERROR);
@@ -308,8 +314,9 @@ void BattOrAgent::OnMessageRead(bool success,
base::TimeTicks min_request_samples_time =
last_clock_sync_time_ + base::TimeDelta::FromMilliseconds(
kStopTracingClockSyncDelayMilliseconds);
- base::TimeDelta request_samples_delay = std::max(
- min_request_samples_time - base::TimeTicks::Now(), base::TimeDelta());
+ base::TimeDelta request_samples_delay =
+ std::max(min_request_samples_time - tick_clock_->NowTicks(),
+ base::TimeDelta());
PerformDelayedAction(Action::SEND_SAMPLES_REQUEST, request_samples_delay);
return;
@@ -366,7 +373,7 @@ void BattOrAgent::OnMessageRead(bool success,
uint32_t sample_num;
memcpy(&sample_num, bytes->data(), sizeof(uint32_t));
clock_sync_markers_[sample_num] = pending_clock_sync_marker_;
- last_clock_sync_time_ = base::TimeTicks::Now();
+ last_clock_sync_time_ = tick_clock_->NowTicks();
CompleteCommand(BATTOR_ERROR_NONE);
return;
@@ -387,7 +394,7 @@ void BattOrAgent::OnMessageRead(bool success,
}
void BattOrAgent::PerformAction(Action action) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
last_action_ = action;
@@ -477,7 +484,7 @@ void BattOrAgent::PerformAction(Action action) {
}
void BattOrAgent::PerformDelayedAction(Action action, base::TimeDelta delay) {
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&BattOrAgent::PerformAction, AsWeakPtr(), action),
delay);
}
@@ -503,7 +510,7 @@ void BattOrAgent::OnActionTimeout() {
void BattOrAgent::SendControlMessage(BattOrControlMessageType type,
uint16_t param1,
uint16_t param2) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
SetActionTimeout(kBattOrControlMessageTimeoutSeconds);
@@ -517,6 +524,10 @@ void BattOrAgent::RetryCommand() {
return;
}
+ // Restart the serial connection to guarantee that the connection gets flushed
+ // before retrying the command.
+ connection_->Close();
+
// Failed to read response to message, retry current command.
base::Callback<void()> next_command;
switch (command_) {
@@ -533,7 +544,7 @@ void BattOrAgent::RetryCommand() {
NOTREACHED();
}
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, next_command,
base::TimeDelta::FromSeconds(kCommandRetryDelaySeconds));
}
@@ -541,26 +552,26 @@ void BattOrAgent::RetryCommand() {
void BattOrAgent::CompleteCommand(BattOrError error) {
switch (command_) {
case Command::START_TRACING:
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Listener::OnStartTracingComplete,
base::Unretained(listener_), error));
break;
case Command::STOP_TRACING:
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&Listener::OnStopTracingComplete,
- base::Unretained(listener_), SamplesToString(), error));
+ base::Unretained(listener_), SamplesToResults(), error));
break;
case Command::RECORD_CLOCK_SYNC_MARKER:
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Listener::OnRecordClockSyncMarkerComplete,
base::Unretained(listener_), error));
break;
case Command::GET_FIRMWARE_GIT_HASH:
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::Bind(&Listener::OnGetFirmwareGitHashComplete,
- base::Unretained(listener_),
- firmware_git_hash_, error));
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::Bind(&Listener::OnGetFirmwareGitHashComplete,
+ base::Unretained(listener_), firmware_git_hash_, error));
break;
case Command::INVALID:
NOTREACHED();
@@ -577,9 +588,9 @@ void BattOrAgent::CompleteCommand(BattOrError error) {
num_command_attempts_ = 0;
}
-std::string BattOrAgent::SamplesToString() {
+BattOrResults BattOrAgent::SamplesToResults() {
if (calibration_frame_.empty() || samples_.empty() || !battor_eeprom_)
- return "";
+ return BattOrResults();
BattOrSampleConverter converter(*battor_eeprom_, calibration_frame_);
@@ -614,13 +625,19 @@ std::string BattOrAgent::SamplesToString() {
trace_stream << std::endl;
}
- return trace_stream.str();
+ // Convert to a vector of power in watts.
+ std::vector<float> samples(samples_.size());
+ for (size_t i = 0; i < samples_.size(); i++)
+ samples[i] = converter.ToWatts(samples_[i]);
+
+ return BattOrResults(trace_stream.str(), samples,
+ battor_eeprom_->sd_sample_rate);
}
void BattOrAgent::SetActionTimeout(uint16_t timeout_seconds) {
timeout_callback_.Reset(
base::Bind(&BattOrAgent::OnActionTimeout, AsWeakPtr()));
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, timeout_callback_.callback(),
base::TimeDelta::FromSeconds(timeout_seconds));
}
diff --git a/chromium/tools/battor_agent/battor_agent.h b/chromium/tools/battor_agent/battor_agent.h
index fa39fee1098..f0179be3d41 100644
--- a/chromium/tools/battor_agent/battor_agent.h
+++ b/chromium/tools/battor_agent/battor_agent.h
@@ -6,18 +6,43 @@
#define TOOLS_BATTOR_AGENT_BATTOR_AGENT_H_
#include <map>
+#include <vector>
#include "base/cancelable_callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/sequence_checker.h"
#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_checker.h"
+#include "base/time/default_tick_clock.h"
#include "tools/battor_agent/battor_connection.h"
#include "tools/battor_agent/battor_error.h"
namespace battor {
+// A BattOrResults object contains the results of BattOr tracing, including a
+// summary and sample data in watts.
+class BattOrResults {
+ public:
+ BattOrResults();
+ BattOrResults(std::string details,
+ std::vector<float> power_samples_W,
+ uint32_t sample_rate);
+ BattOrResults(const BattOrResults&);
+ ~BattOrResults();
+
+ // Get a detailed textual representation of the data recorded.
+ const std::string& ToString() const { return details_; }
+ // Returns a vector of power samples (in watts).
+ const std::vector<float>& GetPowerSamples() const { return power_samples_W_; }
+ uint32_t GetSampleRate() const { return sample_rate_; }
+
+ private:
+ std::string details_;
+ std::vector<float> power_samples_W_;
+ uint32_t sample_rate_ = 0;
+};
+
// A BattOrAgent is a class used to asynchronously communicate with a BattOr for
// the purpose of collecting power samples. A BattOr is an external USB device
// that's capable of recording accurate, high-frequency (2000Hz) power samples.
@@ -41,7 +66,7 @@ class BattOrAgent : public BattOrConnection::Listener,
class Listener {
public:
virtual void OnStartTracingComplete(BattOrError error) = 0;
- virtual void OnStopTracingComplete(const std::string& trace,
+ virtual void OnStopTracingComplete(const BattOrResults& trace,
BattOrError error) = 0;
virtual void OnRecordClockSyncMarkerComplete(BattOrError error) = 0;
virtual void OnGetFirmwareGitHashComplete(const std::string& version,
@@ -76,6 +101,9 @@ class BattOrAgent : public BattOrConnection::Listener,
// fake in testing.
std::unique_ptr<BattOrConnection> connection_;
+ // A source of TimeTicks. Protected so that it can be faked in testing.
+ std::unique_ptr<base::TickClock> tick_clock_;
+
// Timeout for when an action isn't completed within the allotted time. This
// is virtual and protected so that timeouts can be disabled in testing. The
// testing task runner that runs delayed tasks immediately deals poorly with
@@ -141,7 +169,7 @@ class BattOrAgent : public BattOrConnection::Listener,
void CompleteCommand(BattOrError error);
// Returns a formatted version of samples_ with timestamps and real units.
- std::string SamplesToString();
+ BattOrResults SamplesToResults();
// Sets and restarts the action timeout timer.
void SetActionTimeout(uint16_t timeout_seconds);
@@ -168,9 +196,6 @@ class BattOrAgent : public BattOrConnection::Listener,
// The time at which the last clock sync marker was recorded.
base::TimeTicks last_clock_sync_time_;
- // Checker to make sure that this is only ever called on the IO thread.
- base::ThreadChecker thread_checker_;
-
// The BattOr's EEPROM (which is required for calibration).
std::unique_ptr<BattOrEEPROM> battor_eeprom_;
@@ -193,6 +218,8 @@ class BattOrAgent : public BattOrConnection::Listener,
// The git hash of the BattOr firmware.
std::string firmware_git_hash_;
+ SEQUENCE_CHECKER(sequence_checker_);
+
DISALLOW_COPY_AND_ASSIGN(BattOrAgent);
};
diff --git a/chromium/tools/battor_agent/battor_agent_bin.cc b/chromium/tools/battor_agent/battor_agent_bin.cc
index f839a16216d..a9f11c5ac65 100644
--- a/chromium/tools/battor_agent/battor_agent_bin.cc
+++ b/chromium/tools/battor_agent/battor_agent_bin.cc
@@ -34,15 +34,18 @@
#include <stdint.h>
#include <fstream>
+#include <iomanip>
#include <iostream>
#include "base/at_exit.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/files/file_path.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
+#include "base/path_service.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_tokenizer.h"
@@ -70,6 +73,7 @@ const char kUsage[] =
"\n"
"Switches: \n"
" --battor-path=<path> Uses the specified BattOr path.\n"
+ " --interactive Enables interactive power profiling."
"\n"
"Once in the shell, you can issue the following commands:\n"
"\n"
@@ -82,6 +86,10 @@ const char kUsage[] =
" Help\n"
"\n";
+// The command line switch used to enable interactive mode where starting and
+// stopping is easily toggled.
+const char kInteractiveSwitch[] = "interactive";
+
void PrintSupportsExplicitClockSync() {
std::cout << BattOrAgent::SupportsExplicitClockSync() << endl;
}
@@ -133,6 +141,13 @@ class BattOrAgentBin : public BattOrAgent::Listener {
SetUp(path);
+ if (base::CommandLine::ForCurrentProcess()->HasSwitch(kInteractiveSwitch)) {
+ interactive_ = true;
+ std::cout << "Type <Enter> to toggle tracing, type Exit or Ctrl+C "
+ "to quit, or Help for help."
+ << endl;
+ }
+
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&BattOrAgentBin::RunNextCommand, base::Unretained(this)));
@@ -178,6 +193,14 @@ class BattOrAgentBin : public BattOrAgent::Listener {
std::string cmd;
std::getline(std::cin, cmd);
+ if (interactive_) {
+ if (cmd == "") {
+ cmd = is_tracing_ ? "StopTracing" : "StartTracing";
+ std::cout << cmd << endl;
+ is_tracing_ = !is_tracing_;
+ }
+ }
+
if (cmd == "StartTracing") {
StartTracing();
} else if (cmd.find("StopTracing") != std::string::npos) {
@@ -196,6 +219,9 @@ class BattOrAgentBin : public BattOrAgent::Listener {
tokens.size() == 2 ? tokens[1] : std::string();
StopTracing(trace_output_file);
+ if (interactive_) {
+ PostRunNextCommand();
+ }
} else if (cmd == "SupportsExplicitClockSync") {
PrintSupportsExplicitClockSync();
PostRunNextCommand();
@@ -266,27 +292,106 @@ class BattOrAgentBin : public BattOrAgent::Listener {
base::Bind(&BattOrAgent::StopTracing, base::Unretained(agent_.get())));
}
- void OnStopTracingComplete(const std::string& trace,
+ std::string BattOrResultsToSummary(const BattOrResults& results) {
+ const uint32_t samples_per_second = results.GetSampleRate();
+
+ // Print a summary of a BattOr trace. These summaries are intended for human
+ // consumption and are subject to change at any moment. The summary is
+ // printed when using interactive mode.
+ std::stringstream trace_summary;
+ // Display floating-point numbers without exponents, in a five-character
+ // field, with two digits of precision. ie;
+ // 12.39
+ // 8.40
+ trace_summary << std::fixed << std::setw(5) << std::setprecision(2);
+
+ // Scan through the sample data to summarize it. Report on average power and
+ // second-by-second power including min-second, median-second, and
+ // max-second.
+ double total_power = 0.0;
+ int num_seconds = 0;
+ std::vector<double> power_by_seconds;
+ const std::vector<float>& samples = results.GetPowerSamples();
+ for (size_t i = 0; i < samples.size(); i += samples_per_second) {
+ size_t loop_count = samples.size() - i;
+ if (loop_count > samples_per_second)
+ loop_count = samples_per_second;
+
+ double second_power = 0.0;
+ for (size_t j = i; j < i + loop_count; ++j) {
+ total_power += samples[i];
+ second_power += samples[i];
+ }
+
+ // Print/store results for full seconds.
+ if (loop_count == samples_per_second) {
+ // Calculate power for one second in watts.
+ second_power /= samples_per_second;
+ trace_summary << "Second " << std::setw(2) << num_seconds
+ << " average power: " << std::setw(5) << second_power
+ << " W" << std::endl;
+ ++num_seconds;
+ power_by_seconds.push_back(second_power);
+ }
+ }
+ // Calculate average power in watts.
+ const double average_power_W = total_power / samples.size();
+ const double duration_sec =
+ static_cast<double>(samples.size()) / samples_per_second;
+ trace_summary << "Average power over " << duration_sec
+ << " s : " << average_power_W << " W" << std::endl;
+ std::sort(power_by_seconds.begin(), power_by_seconds.end());
+ if (power_by_seconds.size() >= 3) {
+ trace_summary << "Summary of power-by-seconds:" << std::endl
+ << "Minimum: " << power_by_seconds[0] << std::endl
+ << "Median: "
+ << power_by_seconds[power_by_seconds.size() / 2]
+ << std::endl
+ << "Maximum: "
+ << power_by_seconds[power_by_seconds.size() - 1]
+ << std::endl;
+ } else {
+ trace_summary << "Too short a trace to generate per-second summary.";
+ }
+
+ return trace_summary.str();
+ }
+
+ void OnStopTracingComplete(const BattOrResults& results,
BattOrError error) override {
if (error == BATTOR_ERROR_NONE) {
+ std::string output_file = trace_output_file_;
if (trace_output_file_.empty()) {
- std::cout << trace;
- } else {
- std::ofstream trace_stream(trace_output_file_);
- if (!trace_stream.is_open()) {
- std::cout << "Tracing output file could not be opened." << endl;
- exit(1);
- }
- trace_stream << trace;
- trace_stream.close();
+ // Save the detailed results in case they are needed.
+ base::FilePath default_path;
+ PathService::Get(base::DIR_USER_DESKTOP, &default_path);
+ default_path = default_path.Append(FILE_PATH_LITERAL("trace_data.txt"));
+ output_file = default_path.AsUTF8Unsafe().c_str();
+ std::cout << "Saving detailed results to " << output_file << std::endl;
}
+
+ if (interactive_) {
+ // Print a summary of the trace.
+ std::cout << BattOrResultsToSummary(results) << endl;
+ }
+
+ std::ofstream trace_stream(output_file);
+ if (!trace_stream.is_open()) {
+ std::cout << "Tracing output file \"" << output_file
+ << "\" could not be opened." << endl;
+ exit(1);
+ }
+ trace_stream << results.ToString();
+ trace_stream.close();
std::cout << "Done." << endl;
} else {
HandleError(error);
}
- ui_thread_message_loop_.task_runner()->PostTask(
- FROM_HERE, ui_thread_run_loop_.QuitClosure());
+ if (!interactive_) {
+ ui_thread_message_loop_.task_runner()->PostTask(
+ FROM_HERE, ui_thread_run_loop_.QuitClosure());
+ }
}
void RecordClockSyncMarker(const std::string& marker) {
@@ -336,6 +441,11 @@ class BattOrAgentBin : public BattOrAgent::Listener {
std::unique_ptr<BattOrAgent> agent_;
std::string trace_output_file_;
+
+ // When true user can Start/Stop tracing by typing Enter.
+ bool interactive_ = false;
+ // Toggle to support alternating starting/stopping tracing.
+ bool is_tracing_ = false;
};
} // namespace battor
diff --git a/chromium/tools/battor_agent/battor_agent_unittest.cc b/chromium/tools/battor_agent/battor_agent_unittest.cc
index d5f882b1fd7..978de54723e 100644
--- a/chromium/tools/battor_agent/battor_agent_unittest.cc
+++ b/chromium/tools/battor_agent/battor_agent_unittest.cc
@@ -6,7 +6,7 @@
#include "tools/battor_agent/battor_agent.h"
-#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_mock_time_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -81,10 +81,12 @@ class MockBattOrConnection : public BattOrConnection {
// TestableBattOrAgent uses a fake BattOrConnection to be testable.
class TestableBattOrAgent : public BattOrAgent {
public:
- TestableBattOrAgent(BattOrAgent::Listener* listener)
+ TestableBattOrAgent(BattOrAgent::Listener* listener,
+ std::unique_ptr<base::TickClock> tick_clock)
: BattOrAgent("/dev/test", listener, nullptr) {
connection_ =
std::unique_ptr<BattOrConnection>(new MockBattOrConnection(this));
+ tick_clock_ = std::move(tick_clock);
}
MockBattOrConnection* GetConnection() {
@@ -99,7 +101,7 @@ class TestableBattOrAgent : public BattOrAgent {
class BattOrAgentTest : public testing::Test, public BattOrAgent::Listener {
public:
BattOrAgentTest()
- : task_runner_(new base::TestSimpleTaskRunner()),
+ : task_runner_(new base::TestMockTimeTaskRunner()),
thread_task_runner_handle_(task_runner_) {}
void OnStartTracingComplete(BattOrError error) override {
@@ -107,11 +109,11 @@ class BattOrAgentTest : public testing::Test, public BattOrAgent::Listener {
command_error_ = error;
}
- void OnStopTracingComplete(const std::string& trace,
+ void OnStopTracingComplete(const BattOrResults& results,
BattOrError error) override {
is_command_complete_ = true;
command_error_ = error;
- trace_ = trace;
+ trace_ = results.ToString();
}
void OnRecordClockSyncMarkerComplete(BattOrError error) override {
@@ -140,7 +142,8 @@ class BattOrAgentTest : public testing::Test, public BattOrAgent::Listener {
protected:
void SetUp() override {
- agent_.reset(new TestableBattOrAgent(this));
+ agent_.reset(
+ new TestableBattOrAgent(this, task_runner_->GetMockTickClock()));
task_runner_->ClearPendingTasks();
is_command_complete_ = false;
command_error_ = BATTOR_ERROR_NONE;
@@ -244,6 +247,7 @@ class BattOrAgentTest : public testing::Test, public BattOrAgent::Listener {
if (end_state == BattOrAgentState::EEPROM_RECEIVED)
return;
+ GetTaskRunner()->FastForwardBy(base::TimeDelta::FromMilliseconds(100));
OnBytesSent(true);
if (end_state == BattOrAgentState::SAMPLES_REQUEST_SENT)
return;
@@ -310,7 +314,7 @@ class BattOrAgentTest : public testing::Test, public BattOrAgent::Listener {
TestableBattOrAgent* GetAgent() { return agent_.get(); }
- scoped_refptr<base::TestSimpleTaskRunner> GetTaskRunner() {
+ scoped_refptr<base::TestMockTimeTaskRunner> GetTaskRunner() {
return task_runner_;
}
@@ -320,7 +324,7 @@ class BattOrAgentTest : public testing::Test, public BattOrAgent::Listener {
std::string GetGitHash() { return firmware_git_hash_; }
private:
- scoped_refptr<base::TestSimpleTaskRunner> task_runner_;
+ scoped_refptr<base::TestMockTimeTaskRunner> task_runner_;
// Needed to support ThreadTaskRunnerHandle::Get() in code under test.
base::ThreadTaskRunnerHandle thread_task_runner_handle_;
@@ -540,6 +544,20 @@ TEST_F(BattOrAgentTest, StartTracingFailsAfterTooManyCumulativeFailures) {
EXPECT_EQ(BATTOR_ERROR_TOO_MANY_COMMAND_RETRIES, GetCommandError());
}
+TEST_F(BattOrAgentTest, StartTracingRestartsConnectionUponRetry) {
+ GetAgent()->StartTracing();
+ RunStartTracingTo(BattOrAgentState::INIT_SENT);
+
+ EXPECT_CALL(*GetAgent()->GetConnection(), Close());
+
+ OnMessageRead(false, BATTOR_MESSAGE_TYPE_CONTROL_ACK, nullptr);
+
+ RunStartTracingTo(BattOrAgentState::START_TRACING_COMPLETE);
+
+ EXPECT_TRUE(IsCommandComplete());
+ EXPECT_EQ(BATTOR_ERROR_NONE, GetCommandError());
+}
+
TEST_F(BattOrAgentTest, StopTracing) {
testing::InSequence s;
EXPECT_CALL(*GetAgent()->GetConnection(), Open());
@@ -885,6 +903,19 @@ TEST_F(BattOrAgentTest, StopTracingSucceedsAfterDataFrameArrivesOutOfOrder) {
EXPECT_EQ(BATTOR_ERROR_NONE, GetCommandError());
}
+TEST_F(BattOrAgentTest, StopTracingRestartsConnectionUponRetry) {
+ GetAgent()->StopTracing();
+ RunStopTracingTo(BattOrAgentState::SAMPLES_REQUEST_SENT);
+
+ EXPECT_CALL(*GetAgent()->GetConnection(), Close());
+
+ OnMessageRead(true, BATTOR_MESSAGE_TYPE_CONTROL_ACK, ToCharVector(kInitAck));
+ RunStopTracingTo(BattOrAgentState::SAMPLES_END_FRAME_RECEIVED);
+
+ EXPECT_TRUE(IsCommandComplete());
+ EXPECT_EQ(BATTOR_ERROR_NONE, GetCommandError());
+}
+
TEST_F(BattOrAgentTest, RecordClockSyncMarker) {
testing::InSequence s;
EXPECT_CALL(*GetAgent()->GetConnection(), Open());
@@ -921,6 +952,7 @@ TEST_F(BattOrAgentTest, RecordClockSyncMarkerPrintsInStopTracingResult) {
EXPECT_TRUE(IsCommandComplete());
EXPECT_EQ(BATTOR_ERROR_NONE, GetCommandError());
+ GetTaskRunner()->FastForwardBy(base::TimeDelta::FromMilliseconds(100));
GetAgent()->StopTracing();
RunStopTracingTo(BattOrAgentState::SAMPLES_REQUEST_SENT);
@@ -1037,4 +1069,18 @@ TEST_F(BattOrAgentTest, GetFirmwareGitHashSucceedsReadHasWrongType) {
EXPECT_EQ(BATTOR_ERROR_NONE, GetCommandError());
}
+TEST_F(BattOrAgentTest, GetFirmwareRestartsConnectionUponRetry) {
+ GetAgent()->GetFirmwareGitHash();
+ RunGetFirmwareGitHashTo(BattOrAgentState::GIT_FIRMWARE_HASH_REQUEST_SENT);
+
+ EXPECT_CALL(*GetAgent()->GetConnection(), Close());
+
+ OnMessageRead(false, BATTOR_MESSAGE_TYPE_CONTROL_ACK, nullptr);
+
+ RunGetFirmwareGitHashTo(BattOrAgentState::READ_GIT_HASH_RECEIVED);
+
+ EXPECT_TRUE(IsCommandComplete());
+ EXPECT_EQ(BATTOR_ERROR_NONE, GetCommandError());
+}
+
} // namespace battor
diff --git a/chromium/tools/battor_agent/battor_connection_impl.cc b/chromium/tools/battor_agent/battor_connection_impl.cc
index f1ced22c451..f4a386cd5d0 100644
--- a/chromium/tools/battor_agent/battor_connection_impl.cc
+++ b/chromium/tools/battor_agent/battor_connection_impl.cc
@@ -11,7 +11,7 @@
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
#include "base/strings/stringprintf.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/time/default_tick_clock.h"
#include "device/serial/buffer.h"
#include "device/serial/serial_io_handler.h"
@@ -41,6 +41,8 @@ const bool kBattOrCtsFlowControl = true;
const bool kBattOrHasCtsFlowControl = true;
// The maximum BattOr message is 50kB long.
const size_t kMaxMessageSizeBytes = 50000;
+// The number of seconds allowed for the connection to open before timing out.
+const uint8_t kConnectTimeoutSeconds = 10;
const size_t kFlushBufferSize = 50000;
// The length of time that must pass without receiving any bytes in order for a
// flush to be considered complete.
@@ -87,7 +89,12 @@ BattOrConnectionImpl::~BattOrConnectionImpl() {}
void BattOrConnectionImpl::Open() {
if (io_handler_) {
- OnOpened(true);
+ LogSerial("Serial connection already open.");
+
+ // Skip flushing the connection because it's already open.
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(&Listener::OnConnectionOpened,
+ base::Unretained(listener_), true));
return;
}
@@ -102,6 +109,7 @@ void BattOrConnectionImpl::Open() {
options.has_cts_flow_control = kBattOrHasCtsFlowControl;
LogSerial("Opening serial connection.");
+ SetTimeout(base::TimeDelta::FromSeconds(kConnectTimeoutSeconds));
io_handler_->Open(
path_, options,
base::BindOnce(&BattOrConnectionImpl::OnOpened, AsWeakPtr()));
@@ -110,10 +118,11 @@ void BattOrConnectionImpl::Open() {
void BattOrConnectionImpl::OnOpened(bool success) {
LogSerial(StringPrintf("Serial connection open finished with success: %d.",
success));
+ timeout_callback_.Cancel();
if (!success) {
Close();
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Listener::OnConnectionOpened,
base::Unretained(listener_), false));
return;
@@ -205,8 +214,7 @@ void BattOrConnectionImpl::BeginReadBytesForMessage(size_t max_bytes_to_read) {
LogSerial(StringPrintf("(message) Starting read of up to %zu bytes.",
max_bytes_to_read));
- pending_read_buffer_ =
- make_scoped_refptr(new net::IOBuffer(max_bytes_to_read));
+ pending_read_buffer_ = base::MakeRefCounted<net::IOBuffer>(max_bytes_to_read);
io_handler_->Read(base::MakeUnique<device::ReceiveBuffer>(
pending_read_buffer_, static_cast<uint32_t>(max_bytes_to_read),
@@ -290,7 +298,7 @@ void BattOrConnectionImpl::EndReadBytesForMessage(
LogSerial(StringPrintf("(message) Read finished with success: %d.", success));
pending_read_buffer_ = nullptr;
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&Listener::OnMessageRead, base::Unretained(listener_), success,
type, base::Passed(std::move(bytes))));
@@ -309,28 +317,26 @@ void BattOrConnectionImpl::BeginReadBytesForFlush() {
StringPrintf("(flush) Starting read (quiet period has lasted %f ms).",
quiet_period_duration.InMillisecondsF()));
- pending_read_buffer_ =
- make_scoped_refptr(new net::IOBuffer(kFlushBufferSize));
+ pending_read_buffer_ = base::MakeRefCounted<net::IOBuffer>(kFlushBufferSize);
io_handler_->Read(base::MakeUnique<device::ReceiveBuffer>(
pending_read_buffer_, static_cast<uint32_t>(kFlushBufferSize),
base::BindOnce(&BattOrConnectionImpl::OnBytesReadForFlush,
base::Unretained(this))));
- SetFlushReadTimeout();
+ SetTimeout(base::TimeDelta::FromMilliseconds(kFlushQuietPeriodThresholdMs));
}
-void BattOrConnectionImpl::SetFlushReadTimeout() {
- flush_timeout_callback_.Reset(
+void BattOrConnectionImpl::SetTimeout(base::TimeDelta timeout) {
+ timeout_callback_.Reset(
base::Bind(&BattOrConnectionImpl::CancelReadMessage, AsWeakPtr()));
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, flush_timeout_callback_.callback(),
- base::TimeDelta::FromMilliseconds(kFlushQuietPeriodThresholdMs));
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, timeout_callback_.callback(), timeout);
}
void BattOrConnectionImpl::OnBytesReadForFlush(
int bytes_read,
device::mojom::SerialReceiveError error) {
- flush_timeout_callback_.Cancel();
+ timeout_callback_.Cancel();
if (error != device::mojom::SerialReceiveError::NONE &&
error != device::mojom::SerialReceiveError::TIMEOUT) {
@@ -338,7 +344,7 @@ void BattOrConnectionImpl::OnBytesReadForFlush(
"(flush) Read failed due to serial read failure with error code: %d.",
static_cast<int>(error)));
pending_read_buffer_ = nullptr;
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Listener::OnConnectionOpened,
base::Unretained(listener_), false));
return;
@@ -354,7 +360,7 @@ void BattOrConnectionImpl::OnBytesReadForFlush(
base::TimeDelta::FromMilliseconds(kFlushQuietPeriodThresholdMs)) {
LogSerial("(flush) Quiet period has finished.");
pending_read_buffer_ = nullptr;
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&Listener::OnConnectionOpened,
base::Unretained(listener_), true));
return;
@@ -364,7 +370,7 @@ void BattOrConnectionImpl::OnBytesReadForFlush(
// read again after a delay.
LogSerial(StringPrintf("(flush) Reading more bytes after %u ms delay.",
kFlushQuietPeriodThresholdMs));
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ base::SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&BattOrConnectionImpl::BeginReadBytesForFlush,
AsWeakPtr()),
@@ -433,7 +439,7 @@ void BattOrConnectionImpl::OnBytesSent(int bytes_sent,
device::mojom::SerialSendError error) {
bool success = (error == device::mojom::SerialSendError::NONE) &&
(pending_write_length_ == static_cast<size_t>(bytes_sent));
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&Listener::OnBytesSent, base::Unretained(listener_), success));
}
diff --git a/chromium/tools/battor_agent/battor_connection_impl.h b/chromium/tools/battor_agent/battor_connection_impl.h
index 46a132fa0ad..3e1cf408774 100644
--- a/chromium/tools/battor_agent/battor_connection_impl.h
+++ b/chromium/tools/battor_agent/battor_connection_impl.h
@@ -83,7 +83,7 @@ class BattOrConnectionImpl
void BeginReadBytesForFlush();
void OnBytesReadForFlush(int bytes_read,
device::mojom::SerialReceiveError error);
- void SetFlushReadTimeout();
+ void SetTimeout(base::TimeDelta timeout);
// Pulls off the next complete message from already_read_buffer_, returning
// its type and contents through out parameters and any error that occurred
@@ -123,8 +123,8 @@ class BattOrConnectionImpl
// connection in order for Flush() to be considered complete.
base::TimeTicks flush_quiet_period_start_;
- // The timeout that will trigger a timeout at the end of a flush quiet period.
- base::CancelableClosure flush_timeout_callback_;
+ // The timeout for the current action.
+ base::CancelableClosure timeout_callback_;
// Threads needed for serial communication.
scoped_refptr<base::SingleThreadTaskRunner> ui_thread_task_runner_;
diff --git a/chromium/tools/battor_agent/battor_connection_impl_unittest.cc b/chromium/tools/battor_agent/battor_connection_impl_unittest.cc
index 4b37d77a0f0..98de9eedc90 100644
--- a/chromium/tools/battor_agent/battor_connection_impl_unittest.cc
+++ b/chromium/tools/battor_agent/battor_connection_impl_unittest.cc
@@ -80,6 +80,8 @@ class BattOrConnectionImplTest : public testing::Test,
task_runner_->RunUntilIdle();
}
+ void CloseConnection() { connection_->Close(); }
+
void ReadMessage(BattOrMessageType type) {
is_read_complete_ = false;
connection_->ReadMessage(type);
@@ -160,11 +162,24 @@ TEST_F(BattOrConnectionImplTest, OpenConnectionSucceedsAfterTimeout) {
ASSERT_TRUE(GetOpenSuccess());
}
+TEST_F(BattOrConnectionImplTest, OpenConnectionSucceedsImmediatelyIfOpen) {
+ OpenConnection();
+ ASSERT_FALSE(IsOpenComplete());
+
+ AdvanceTickClock(base::TimeDelta::FromMilliseconds(50));
+
+ OpenConnection();
+ ASSERT_TRUE(IsOpenComplete());
+ ASSERT_TRUE(GetOpenSuccess());
+}
+
TEST_F(BattOrConnectionImplTest, OpenConnectionFlushesIfAlreadyOpen) {
OpenConnection();
AdvanceTickClock(base::TimeDelta::FromMilliseconds(50));
SendControlMessage(BATTOR_CONTROL_MESSAGE_TYPE_RESET, 4, 7);
+
+ CloseConnection();
OpenConnection();
AdvanceTickClock(base::TimeDelta::FromMilliseconds(50));
@@ -203,6 +218,7 @@ TEST_F(BattOrConnectionImplTest, OpenConnectionFlushesAlreadyReadBuffer) {
SendBytesRaw(data, 9);
ReadMessage(BATTOR_MESSAGE_TYPE_SAMPLES);
+ CloseConnection();
OpenConnection();
AdvanceTickClock(base::TimeDelta::FromMilliseconds(50));
@@ -260,6 +276,7 @@ TEST_F(BattOrConnectionImplTest, OpenConnectionFlushesMultipleReadsOfData) {
for (int i = 0; i < 10; i++)
SendBytesRaw(data, 50000);
+ CloseConnection();
OpenConnection();
AdvanceTickClock(base::TimeDelta::FromMilliseconds(50));
diff --git a/chromium/tools/battor_agent/battor_sample_converter.cc b/chromium/tools/battor_agent/battor_sample_converter.cc
index e13afa171b0..bcee95b7bea 100644
--- a/chromium/tools/battor_agent/battor_sample_converter.cc
+++ b/chromium/tools/battor_agent/battor_sample_converter.cc
@@ -91,6 +91,12 @@ BattOrSample BattOrSampleConverter::ToSample(const RawBattOrSample& sample,
return BattOrSample{time_ms, voltage, current};
}
+float BattOrSampleConverter::ToWatts(const RawBattOrSample& raw_sample) const {
+ BattOrSample sample = ToSample(raw_sample, 0);
+
+ return sample.current_mA * sample.voltage_mV * 1e-6f;
+}
+
BattOrSample BattOrSampleConverter::MinSample() const {
// Create a minimum raw sample.
RawBattOrSample sample_raw = {kAnalogDigitalConverterMinValue,
diff --git a/chromium/tools/battor_agent/battor_sample_converter.h b/chromium/tools/battor_agent/battor_sample_converter.h
index 89e312c87b1..23915e91981 100644
--- a/chromium/tools/battor_agent/battor_sample_converter.h
+++ b/chromium/tools/battor_agent/battor_sample_converter.h
@@ -33,6 +33,9 @@ class BattOrSampleConverter {
BattOrSample ToSample(const RawBattOrSample& sample,
size_t sample_number) const;
+ // Converts a raw sample to watts.
+ float ToWatts(const RawBattOrSample& sample) const;
+
// Returns the lowest magnitude sample that the BattOr can collect.
BattOrSample MinSample() const;
diff --git a/chromium/tools/binary_size/README.md b/chromium/tools/binary_size/README.md
index cff9d133926..8e4f94caa0d 100644
--- a/chromium/tools/binary_size/README.md
+++ b/chromium/tools/binary_size/README.md
@@ -1,8 +1,8 @@
# Tools for Analyzing Chrome's Binary Size
-These tools currently focus on Android compiled with GCC. They somewhat work
-for Android + Clang, and Linux builds, but not as well. As for Windows, some
-great tools already exist and are documented here:
+These tools currently focus on Android. They somewhat work with Linux builds,
+but not as well. As for Windows, some great tools already exist and are
+documented here:
* https://www.chromium.org/developers/windows-binary-sizes
diff --git a/chromium/tools/binary_size/diagnose_bloat.py b/chromium/tools/binary_size/diagnose_bloat.py
index e1f84147be6..e265b815364 100755
--- a/chromium/tools/binary_size/diagnose_bloat.py
+++ b/chromium/tools/binary_size/diagnose_bloat.py
@@ -202,6 +202,7 @@ class _BuildHelper(object):
self.target = args.target
self.target_os = args.target_os
self.use_goma = args.use_goma
+ self.clean = args.clean
self._SetDefaults()
@property
@@ -266,7 +267,7 @@ class _BuildHelper(object):
'ffmpeg_branding="Chrome" proprietary_codecs=true')
if self.IsLinux():
self.extra_gn_args_str += (
- ' allow_posix_link_time_opt=false generate_linker_map=true')
+ ' is_cfi=false generate_linker_map=true')
self.target = self.target if self.IsAndroid() else 'chrome'
def _GenGnCmd(self):
@@ -290,6 +291,8 @@ class _BuildHelper(object):
"""Run GN gen/ninja build and return the process returncode."""
logging.info('Building %s within %s (this might take a while).',
self.target, os.path.relpath(self.output_directory))
+ if self.clean:
+ _RunCmd(['gn', 'clean', self.output_directory])
retcode = _RunCmd(
self._GenGnCmd(), verbose=True, exit_on_failure=False)[1]
if retcode:
@@ -312,13 +315,15 @@ class _BuildHelper(object):
class _BuildArchive(object):
"""Class for managing a directory with build results and build metadata."""
- def __init__(self, rev, base_archive_dir, build, subrepo, slow_options):
+ def __init__(self, rev, base_archive_dir, build, subrepo, slow_options,
+ save_unstripped):
self.build = build
self.dir = os.path.join(base_archive_dir, rev)
metadata_path = os.path.join(self.dir, 'metadata.txt')
self.rev = rev
self.metadata = _Metadata([self], build, metadata_path, subrepo)
self._slow_options = slow_options
+ self._save_unstripped = save_unstripped
def ArchiveBuildResults(self, supersize_path):
"""Save build artifacts necessary for diffing."""
@@ -329,10 +334,24 @@ class _BuildArchive(object):
self._ArchiveFile(self.build.abs_apk_path + '.mapping')
self._ArchiveResourceSizes()
self._ArchiveSizeFile(supersize_path)
+ if self._save_unstripped:
+ self._ArchiveFile(self.build.abs_main_lib_path)
self.metadata.Write()
+ assert self.Exists()
def Exists(self):
- return self.metadata.Exists()
+ ret = self.metadata.Exists() and os.path.exists(self.archived_size_path)
+ if self._save_unstripped:
+ ret = ret and os.path.exists(self.archived_unstripped_path)
+ return ret
+
+ @property
+ def archived_unstripped_path(self):
+ return os.path.join(self.dir, os.path.basename(self.build.main_lib_path))
+
+ @property
+ def archived_size_path(self):
+ return os.path.join(self.dir, self.build.size_name)
def _ArchiveResourceSizes(self):
cmd = [_RESOURCE_SIZES_PATH, self.build.abs_apk_path,'--output-dir',
@@ -352,12 +371,10 @@ class _BuildArchive(object):
existing_size_file = self.build.abs_apk_path + '.size'
if os.path.exists(existing_size_file):
logging.info('Found existing .size file')
- os.rename(
- existing_size_file, os.path.join(self.dir, self.build.size_name))
+ shutil.copy(existing_size_file, self.archived_size_path)
else:
- size_path = os.path.join(self.dir, self.build.size_name)
- supersize_cmd = [supersize_path, 'archive', size_path, '--elf-file',
- self.build.abs_main_lib_path]
+ supersize_cmd = [supersize_path, 'archive', self.archived_size_path,
+ '--elf-file', self.build.abs_main_lib_path]
if self.build.IsCloud():
supersize_cmd += ['--no-source-paths']
else:
@@ -370,11 +387,13 @@ class _BuildArchive(object):
class _DiffArchiveManager(object):
"""Class for maintaining BuildArchives and their related diff artifacts."""
- def __init__(self, revs, archive_dir, diffs, build, subrepo, slow_options):
+ def __init__(self, revs, archive_dir, diffs, build, subrepo, slow_options,
+ save_unstripped):
self.archive_dir = archive_dir
self.build = build
self.build_archives = [
- _BuildArchive(rev, archive_dir, build, subrepo, slow_options)
+ _BuildArchive(rev, archive_dir, build, subrepo, slow_options,
+ save_unstripped)
for rev in revs
]
self.diffs = diffs
@@ -407,18 +426,10 @@ class _DiffArchiveManager(object):
with open(diff_path, 'a') as diff_file:
for d in self.diffs:
d.RunDiff(diff_file, before.dir, after.dir)
- logging.info('See detailed diff results here: %s',
- os.path.relpath(diff_path))
- if len(self.build_archives) == 2:
- supersize_path = os.path.join(_BINARY_SIZE_DIR, 'supersize')
- size_paths = [os.path.join(a.dir, a.build.size_name)
- for a in self.build_archives]
- logging.info('Enter supersize console via: %s console %s %s',
- os.path.relpath(supersize_path),
- os.path.relpath(size_paths[0]),
- os.path.relpath(size_paths[1]))
metadata.Write()
self._AddDiffSummaryStat(before, after)
+ logging.info('See detailed diff results here: %s',
+ os.path.relpath(diff_path))
def Summarize(self):
if self._summary_stats:
@@ -430,11 +441,14 @@ class _DiffArchiveManager(object):
for s, before, after in stats:
_PrintAndWriteToFile(f, '{:>+10} {} {} for range: {}..{}',
s.value, s.units, s.name, before, after)
- elif self.build_archives:
+ if self.build_archives:
supersize_path = os.path.join(_BINARY_SIZE_DIR, 'supersize')
- size_path = os.path.join(self.build_archives[0].dir, self.build.size_name)
- logging.info('Enter supersize console via: %s console %s',
- os.path.relpath(supersize_path), os.path.relpath(size_path))
+ size2 = ''
+ if len(self.build_archives) > 1:
+ size2 = os.path.relpath(self.build_archives[-1].archived_size_path)
+ logging.info('Enter supersize console via: %s console %s %s',
+ os.path.relpath(supersize_path),
+ os.path.relpath(self.build_archives[0].archived_size_path), size2)
def _AddDiffSummaryStat(self, before, after):
@@ -753,6 +767,9 @@ def main():
parser.add_argument('--single',
action='store_true',
help='Sets --reference-rev=rev')
+ parser.add_argument('--unstripped',
+ action='store_true',
+ help='Save the unstripped native library when archiving.')
parser.add_argument('--depot-tools-path',
help='Custom path to depot tools. Needed for --cloud if '
'depot tools isn\'t in your PATH.')
@@ -767,7 +784,7 @@ def main():
help='Show commands executed, extra debugging output'
', and Ninja/GN output')
- build_group = parser.add_argument_group('ninja arguments')
+ build_group = parser.add_argument_group('build arguments')
build_group.add_argument('-j',
dest='max_jobs',
help='Run N jobs in parallel.')
@@ -780,6 +797,9 @@ def main():
dest='use_goma',
default=True,
help='Do not use goma when building with ninja.')
+ build_group.add_argument('--clean',
+ action='store_true',
+ help='Do a clean build for each revision.')
build_group.add_argument('--target-os',
default='android',
choices=['android', 'linux'],
@@ -830,7 +850,8 @@ def main():
ResourceSizesDiff(build.apk_name)
]
diff_mngr = _DiffArchiveManager(revs, args.archive_directory, diffs, build,
- subrepo, args.include_slow_options)
+ subrepo, args.include_slow_options,
+ args.unstripped)
consecutive_failures = 0
for i, archive in enumerate(diff_mngr.IterArchives()):
if archive.Exists():
diff --git a/chromium/tools/binary_size/libsupersize/concurrent.py b/chromium/tools/binary_size/libsupersize/concurrent.py
index 416a5016c26..0008dc78e25 100644
--- a/chromium/tools/binary_size/libsupersize/concurrent.py
+++ b/chromium/tools/binary_size/libsupersize/concurrent.py
@@ -4,6 +4,7 @@
"""Helpers related to multiprocessing."""
+import __builtin__ # __builtins__ does not have exception types.
import atexit
import logging
import multiprocessing
@@ -22,6 +23,9 @@ _all_pools = None
_is_child_process = False
_silence_exceptions = False
+# Used to pass parameters to forked processes without pickling.
+_fork_params = None
+
class _ImmediateResult(object):
def __init__(self, value):
@@ -42,8 +46,14 @@ class _ImmediateResult(object):
class _ExceptionWrapper(object):
"""Used to marshal exception messages back to main process."""
- def __init__(self, msg):
+ def __init__(self, msg, exception_type=None):
self.msg = msg
+ self.exception_type = exception_type
+
+ def MaybeThrow(self):
+ if self.exception_type:
+ raise getattr(__builtin__, self.exception_type)(
+ 'Originally caused by: ' + self.msg)
class _FuncWrapper(object):
@@ -53,13 +63,19 @@ class _FuncWrapper(object):
_is_child_process = True
self._func = func
- def __call__(self, args, _=None):
+ def __call__(self, index, _=None):
try:
- return self._func(*args)
- except: # pylint: disable=bare-except
+ return self._func(*_fork_params[index])
+ except Exception, e:
+ # Only keep the exception type for builtin exception types or else risk
+ # further marshalling exceptions.
+ exception_type = None
+ if type(e).__name__ in dir(__builtin__):
+ exception_type = type(e).__name__
# multiprocessing is supposed to catch and return exceptions automatically
# but it doesn't seem to work properly :(.
- logging.warning('CAUGHT EXCEPTION')
+ return _ExceptionWrapper(traceback.format_exc(), exception_type)
+ except: # pylint: disable=bare-except
return _ExceptionWrapper(traceback.format_exc())
@@ -127,14 +143,20 @@ def _CheckForException(value):
if isinstance(value, _ExceptionWrapper):
global _silence_exceptions
if not _silence_exceptions:
+ value.MaybeThrow()
_silence_exceptions = True
logging.error('Subprocess raised an exception:\n%s', value.msg)
sys.exit(1)
-def _MakeProcessPool(*args):
+def _MakeProcessPool(job_params):
global _all_pools
- ret = multiprocessing.Pool(*args)
+ global _fork_params
+ assert _fork_params is None
+ pool_size = min(len(job_params), multiprocessing.cpu_count())
+ _fork_params = job_params
+ ret = multiprocessing.Pool(pool_size)
+ _fork_params = None
if _all_pools is None:
_all_pools = []
atexit.register(_TerminatePools)
@@ -152,8 +174,8 @@ def ForkAndCall(func, args, decode_func=None):
pool = None
result = _ImmediateResult(func(*args))
else:
- pool = _MakeProcessPool(1)
- result = pool.apply_async(_FuncWrapper(func), (args,))
+ pool = _MakeProcessPool([args])
+ result = pool.apply_async(_FuncWrapper(func), (0,))
pool.close()
return _WrappedResult(result, pool=pool, decode_func=decode_func)
@@ -163,14 +185,18 @@ def BulkForkAndCall(func, arg_tuples):
Yields the return values as they come in.
"""
- pool_size = min(len(arg_tuples), multiprocessing.cpu_count())
+ arg_tuples = list(arg_tuples)
+ if not len(arg_tuples):
+ return
+
if DISABLE_ASYNC:
for args in arg_tuples:
yield func(*args)
return
- pool = _MakeProcessPool(pool_size)
+
+ pool = _MakeProcessPool(arg_tuples)
wrapped_func = _FuncWrapper(func)
- for result in pool.imap_unordered(wrapped_func, arg_tuples):
+ for result in pool.imap_unordered(wrapped_func, xrange(len(arg_tuples))):
_CheckForException(result)
yield result
pool.close()
@@ -198,8 +224,16 @@ def EncodeDictOfLists(d, key_transform=None):
return keys, values
-def DecodeDictOfLists(encoded_keys, encoded_values, key_transform=None):
+def JoinEncodedDictOfLists(encoded_values):
+ return ('\x01'.join(x[0] for x in encoded_values if x[0]),
+ '\x01'.join(x[1] for x in encoded_values if x[1]))
+
+
+def DecodeDictOfLists(encoded_keys_and_values, key_transform=None):
"""Deserializes a dict where values are lists of strings."""
+ encoded_keys, encoded_values = encoded_keys_and_values
+ if not encoded_keys:
+ return {}
keys = encoded_keys.split('\x01')
if key_transform:
keys = (key_transform(k) for k in keys)
diff --git a/chromium/tools/binary_size/libsupersize/concurrent_test.py b/chromium/tools/binary_size/libsupersize/concurrent_test.py
new file mode 100755
index 00000000000..01aa8cda6dc
--- /dev/null
+++ b/chromium/tools/binary_size/libsupersize/concurrent_test.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import threading
+import unittest
+
+import concurrent
+
+
+def _ForkTestHelper(test_instance, parent_pid, arg1, arg2, _=None):
+ test_instance.assertNotEquals(os.getpid(), parent_pid)
+ return arg1 + arg2
+
+
+class Unpicklable(object):
+ """Ensures that pickle() is not called on parameters."""
+ def __getstate__(self):
+ raise AssertionError('Tried to pickle')
+
+
+class ConcurrentTest(unittest.TestCase):
+ def testEncodeDictOfLists_Empty(self):
+ test_dict = {}
+ encoded = concurrent.EncodeDictOfLists(test_dict)
+ decoded = concurrent.DecodeDictOfLists(encoded)
+ self.assertEquals(test_dict, decoded)
+
+ def testEncodeDictOfLists_AllStrings(self):
+ test_dict = {'foo': ['a', 'b', 'c'], 'foo2': ['a', 'b']}
+ encoded = concurrent.EncodeDictOfLists(test_dict)
+ decoded = concurrent.DecodeDictOfLists(encoded)
+ self.assertEquals(test_dict, decoded)
+
+ def testEncodeDictOfLists_KeyTransform(self):
+ test_dict = {0: ['a', 'b', 'c'], 9: ['a', 'b']}
+ encoded = concurrent.EncodeDictOfLists(test_dict, key_transform=str)
+ decoded = concurrent.DecodeDictOfLists(encoded, key_transform=int)
+ self.assertEquals(test_dict, decoded)
+
+ def testEncodeDictOfLists_Join(self):
+ test_dict1 = {'key1': ['a']}
+ test_dict2 = {'key2': ['b']}
+ expected = {'key1': ['a'], 'key2': ['b']}
+ encoded1 = concurrent.EncodeDictOfLists(test_dict1)
+ encoded2 = concurrent.EncodeDictOfLists({})
+ encoded3 = concurrent.EncodeDictOfLists(test_dict2)
+ encoded = concurrent.JoinEncodedDictOfLists([encoded1, encoded2, encoded3])
+ decoded = concurrent.DecodeDictOfLists(encoded)
+ self.assertEquals(expected, decoded)
+
+ def testEncodeDictOfLists_Join_Empty(self):
+ test_dict1 = {}
+ test_dict2 = {}
+ expected = {}
+ encoded1 = concurrent.EncodeDictOfLists(test_dict1)
+ encoded2 = concurrent.EncodeDictOfLists(test_dict2)
+ encoded = concurrent.JoinEncodedDictOfLists([encoded1, encoded2])
+ decoded = concurrent.DecodeDictOfLists(encoded)
+ self.assertEquals(expected, decoded)
+
+ def testCallOnThread(self):
+ main_thread = threading.current_thread()
+ def callback(arg1, arg2):
+ self.assertEquals(1, arg1)
+ self.assertEquals(2, arg2)
+ my_thread = threading.current_thread()
+ self.assertNotEquals(my_thread, main_thread)
+ return 3
+
+ result = concurrent.CallOnThread(callback, 1, arg2=2)
+ self.assertEquals(3, result.get())
+
+ def testForkAndCall_normal(self):
+ parent_pid = os.getpid()
+ result = concurrent.ForkAndCall(
+ _ForkTestHelper, (self, parent_pid, 1, 2, Unpicklable()))
+ self.assertEquals(3, result.get())
+
+ def testForkAndCall_exception(self):
+ parent_pid = os.getpid()
+ result = concurrent.ForkAndCall(_ForkTestHelper, (self, parent_pid, 1, 'a'))
+ self.assertRaises(TypeError, result.get)
+
+ def testBulkForkAndCall_none(self):
+ results = concurrent.BulkForkAndCall(_ForkTestHelper, [])
+ self.assertEquals([], list(results))
+
+ def testBulkForkAndCall_few(self):
+ parent_pid = os.getpid()
+ results = concurrent.BulkForkAndCall(_ForkTestHelper, [
+ (self, parent_pid, 1, 2, Unpicklable()),
+ (self, parent_pid, 3, 4)])
+ self.assertEquals({3, 7}, set(results))
+
+ def testBulkForkAndCall_many(self):
+ parent_pid = os.getpid()
+ args = [(self, parent_pid, 1, 2, Unpicklable())] * 100
+ results = concurrent.BulkForkAndCall(_ForkTestHelper, args)
+ self.assertEquals([3] * 100, list(results))
+
+ def testBulkForkAndCall_exception(self):
+ parent_pid = os.getpid()
+ results = concurrent.BulkForkAndCall(_ForkTestHelper, [
+ (self, parent_pid, 1, 'a')])
+ self.assertRaises(TypeError, results.next)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/chromium/tools/binary_size/libsupersize/console.py b/chromium/tools/binary_size/libsupersize/console.py
index 16748aaeb5f..bd0cb151c6e 100644
--- a/chromium/tools/binary_size/libsupersize/console.py
+++ b/chromium/tools/binary_size/libsupersize/console.py
@@ -14,6 +14,7 @@ import os
import readline
import subprocess
import sys
+import types
import archive
import canned_queries
@@ -70,12 +71,14 @@ class _Session(object):
self._printed_variables = []
self._variables = {
'Print': self._PrintFunc,
+ 'Csv': self._CsvFunc,
'Diff': self._DiffFunc,
'Disassemble': self._DisassembleFunc,
'ExpandRegex': match_util.ExpandRegexIdentifierPlaceholder,
'ShowExamples': self._ShowExamplesFunc,
'canned_queries': canned_queries.CannedQueries(size_infos),
'printed': self._printed_variables,
+ 'models': models,
}
self._lazy_paths = lazy_paths
self._size_infos = size_infos
@@ -102,28 +105,53 @@ class _Session(object):
ret.symbols = ret.symbols.Sorted()
return ret
- def _PrintFunc(self, obj=None, verbose=False, recursive=False, use_pager=None,
- to_file=None):
+ def _GetObjToPrint(self, obj=None):
+ if isinstance(obj, int):
+ obj = self._printed_variables[obj]
+ elif not self._printed_variables or self._printed_variables[-1] != obj:
+ if not isinstance(obj, models.SymbolGroup) or len(obj) > 0:
+ self._printed_variables.append(obj)
+ return obj if obj is not None else self._size_infos[-1]
+
+ def _PrintFunc(self, obj=None, verbose=False, summarize=True, recursive=False,
+ use_pager=None, to_file=None):
"""Prints out the given Symbol / SymbolGroup / SizeInfo.
For convenience, |obj| will be appended to the global "printed" list.
Args:
- obj: The object to be printed. Defaults to size_infos[-1]. Also accepts an
- index into the |printed| array for showing previous results.
+ obj: The object to be printed. Defaults to |size_infos[-1]|. Also accepts
+ an index into the |_printed_variables| array for showing previous
+ results.
verbose: Show more detailed output.
+ summarize: If False, show symbols only (no headers / summaries).
recursive: Print children of nested SymbolGroups.
use_pager: Pipe output through `less`. Ignored when |obj| is a Symbol.
default is to automatically pipe when output is long.
to_file: Rather than print to stdio, write to the given file.
"""
- if isinstance(obj, int):
- obj = self._printed_variables[obj]
- elif not self._printed_variables or self._printed_variables[-1] != obj:
- if not isinstance(obj, models.SymbolGroup) or len(obj) > 0:
- self._printed_variables.append(obj)
- obj = obj if obj is not None else self._size_infos[-1]
- lines = describe.GenerateLines(obj, verbose=verbose, recursive=recursive)
+ obj = self._GetObjToPrint(obj)
+ lines = describe.GenerateLines(
+ obj, verbose=verbose, recursive=recursive, summarize=summarize,
+ format_name='text')
+ _WriteToStream(lines, use_pager=use_pager, to_file=to_file)
+
+ def _CsvFunc(self, obj=None, verbose=False, use_pager=None, to_file=None):
+ """Prints out the given Symbol / SymbolGroup / SizeInfo in CSV format.
+
+ For convenience, |obj| will be appended to the global "printed" list.
+
+ Args:
+ obj: The object to be printed as CSV. Defaults to |size_infos[-1]|. Also
+ accepts an index into the |_printed_variables| array for showing
+ previous results.
+ use_pager: Pipe output through `less`. Ignored when |obj| is a Symbol.
+ default is to automatically pipe when output is long.
+ to_file: Rather than print to stdio, write to the given file.
+ """
+ obj = self._GetObjToPrint(obj)
+ lines = describe.GenerateLines(obj, verbose=verbose, recursive=False,
+ format_name='csv')
_WriteToStream(lines, use_pager=use_pager, to_file=to_file)
def _ElfPathAndToolPrefixForSymbol(self, size_info, elf_path):
@@ -261,6 +289,9 @@ class _Session(object):
'# Show all attributes of all symbols & per-section totals:',
'Print(size_info, verbose=True)',
'',
+ '# Dump section info and all symbols in CSV format:',
+ 'Csv(size_info)',
+ '',
'# Show two levels of .text, grouped by first two subdirectories',
'text_syms = size_info.symbols.WhereInSection("t")',
'by_path = text_syms.GroupedByPath(depth=2)',
@@ -327,6 +358,8 @@ class _Session(object):
' printed: List of objects passed to Print().',
]
for key, value in self._variables.iteritems():
+ if isinstance(value, types.ModuleType):
+ continue
if key.startswith('size_info'):
lines.append(' {}: Loaded from {}'.format(key, value.size_path))
lines.append('*' * 80)
diff --git a/chromium/tools/binary_size/libsupersize/describe.py b/chromium/tools/binary_size/libsupersize/describe.py
index 494ea96de39..3879327d33c 100644
--- a/chromium/tools/binary_size/libsupersize/describe.py
+++ b/chromium/tools/binary_size/libsupersize/describe.py
@@ -3,9 +3,13 @@
# found in the LICENSE file.
"""Methods for converting model objects to human-readable formats."""
+import abc
+import cStringIO
import collections
+import csv
import datetime
import itertools
+import math
import time
import models
@@ -47,37 +51,133 @@ def _Divide(a, b):
return float(a) / b if b else 0
+def _IncludeInTotals(section_name):
+ return section_name != '.bss' and '(' not in section_name
+
+
+def _GetSectionSizeInfo(section_sizes):
+ total_bytes = sum(v for k, v in section_sizes.iteritems()
+ if _IncludeInTotals(k))
+ max_bytes = max(abs(v) for k, v in section_sizes.iteritems()
+ if _IncludeInTotals(k))
+
+ def is_relevant_section(name, size):
+ # Show all sections containing symbols, plus relocations.
+ # As a catch-all, also include any section that comprises > 4% of the
+ # largest section. Use largest section rather than total so that it still
+ # works out when showing a diff containing +100, -100 (total=0).
+ return (name in models.SECTION_TO_SECTION_NAME.values() or
+ name in ('.rela.dyn', '.rel.dyn') or
+ _IncludeInTotals(name) and abs(_Divide(size, max_bytes)) > .04)
+
+ section_names = sorted(k for k, v in section_sizes.iteritems()
+ if is_relevant_section(k, v))
+
+ return (total_bytes, section_names)
+
+
+class Histogram(object):
+ BUCKET_NAMES_FOR_SMALL_VALUES = {-1: '(-1,0)', 0: '{0}', 1: '(0,1)'}
+
+ def __init__(self):
+ self.data = collections.defaultdict(int)
+
+ # Input: (-8,-4], (-4,-2], (-2,-1], (-1,0), {0}, (0,1), [1,2), [2,4), [4,8).
+ # Output: -4, -3, -2, -1, 0, 1, 2, 3, 4.
+ @staticmethod
+ def _Bucket(v):
+ absv = abs(v)
+ if absv < 1:
+ return 0 if v == 0 else (-1 if v < 0 else 1)
+ mag = int(math.log(absv, 2.0)) + 2
+ return mag if v > 0 else -mag
+
+ @staticmethod
+ def _BucketName(k):
+ if abs(k) <= 1:
+ return Histogram.BUCKET_NAMES_FOR_SMALL_VALUES[k]
+ if k < 0:
+ return '(-{},-{}]'.format(1 << (-k - 1), 1 << (-k - 2))
+ return '[{},{})'.format(1 << (k - 2), 1 << (k - 1))
+
+ def Add(self, v):
+ self.data[self._Bucket(v)] += 1
+
+ def Generate(self):
+ keys = sorted(self.data.keys())
+ bucket_names = [self._BucketName(k) for k in keys]
+ bucket_values = [str(self.data[k]) for k in keys]
+ num_items = len(keys)
+ num_cols = 6
+ num_rows = (num_items + num_cols - 1) / num_cols # Divide and round up.
+ # Spaces needed by items in each column, to align on ':'.
+ name_col_widths = []
+ value_col_widths = []
+ for i in xrange(0, num_items, num_rows):
+ name_col_widths.append(max(len(s) for s in bucket_names[i:][:num_rows]))
+ value_col_widths.append(max(len(s) for s in bucket_values[i:][:num_rows]))
+
+ yield 'Histogram of symbols based on PSS:'
+ for r in xrange(num_rows):
+ row = zip(bucket_names[r::num_rows], name_col_widths,
+ bucket_values[r::num_rows], value_col_widths)
+ line = ' ' + ' '.join('{:>{}}: {:<{}}'.format(*t) for t in row)
+ yield line.rstrip()
+
+
class Describer(object):
- def __init__(self, verbose=False, recursive=False):
+ def __init__(self):
+ pass
+
+ @abc.abstractmethod
+ def _DescribeDeltaSizeInfo(self, diff):
+ pass
+
+ @abc.abstractmethod
+ def _DescribeSizeInfo(self, size_info):
+ pass
+
+ @abc.abstractmethod
+ def _DescribeDeltaSymbolGroup(self, delta_group):
+ pass
+
+ @abc.abstractmethod
+ def _DescribeSymbolGroup(self, group):
+ pass
+
+ @abc.abstractmethod
+ def _DescribeSymbol(self, sym, single_line=False):
+ pass
+
+ def GenerateLines(self, obj):
+ if isinstance(obj, models.DeltaSizeInfo):
+ return self._DescribeDeltaSizeInfo(obj)
+ if isinstance(obj, models.SizeInfo):
+ return self._DescribeSizeInfo(obj)
+ if isinstance(obj, models.DeltaSymbolGroup):
+ return self._DescribeDeltaSymbolGroup(obj)
+ if isinstance(obj, models.SymbolGroup):
+ return self._DescribeSymbolGroup(obj)
+ if isinstance(obj, models.Symbol) or isinstance(obj, models.DeltaSymbol):
+ return self._DescribeSymbol(obj)
+ return (repr(obj),)
+
+
+class DescriberText(Describer):
+ def __init__(self, verbose=False, recursive=False, summarize=True):
+ super(DescriberText, self).__init__()
self.verbose = verbose
self.recursive = recursive
+ self.summarize = summarize
def _DescribeSectionSizes(self, section_sizes):
- def include_in_totals(name):
- return name != '.bss' and '(' not in name
-
- total_bytes = sum(v for k, v in section_sizes.iteritems()
- if include_in_totals(k))
- max_bytes = max(abs(v) for k, v in section_sizes.iteritems()
- if include_in_totals(k))
-
- def is_relevant_section(name, size):
- # Show all sections containing symbols, plus relocations.
- # As a catch-all, also include any section that comprises > 4% of the
- # largest section. Use largest section rather than total so that it still
- # works out when showing a diff containing +100, -100 (total=0).
- return (name in models.SECTION_TO_SECTION_NAME.values() or
- name in ('.rela.dyn', '.rel.dyn') or
- include_in_totals(name) and abs(_Divide(size, max_bytes)) > .04)
-
- section_names = sorted(k for k, v in section_sizes.iteritems()
- if is_relevant_section(k, v))
+ total_bytes, section_names = _GetSectionSizeInfo(section_sizes)
yield ''
yield 'Section Sizes (Total={} ({} bytes)):'.format(
_PrettySize(total_bytes), total_bytes)
for name in section_names:
size = section_sizes[name]
- if not include_in_totals(name):
+ if not _IncludeInTotals(name):
yield ' {}: {} ({} bytes) (not included in totals)'.format(
name, _PrettySize(size), size)
else:
@@ -92,7 +192,7 @@ class Describer(object):
if k not in section_names)
for name in section_names:
not_included_part = ''
- if not include_in_totals(name):
+ if not _IncludeInTotals(name):
not_included_part = ' (not included in totals)'
yield ' {}: {} ({} bytes){}'.format(
name, _PrettySize(section_sizes[name]), section_sizes[name],
@@ -112,17 +212,15 @@ class Describer(object):
elif num_aliases[0] > 1 or self.verbose:
last_field = 'num_aliases=%d' % num_aliases[0]
+ pss_field = _FormatPss(sym.pss, sym.IsDelta())
if sym.IsDelta():
b = sum(s.before_symbol.pss_without_padding if s.before_symbol else 0
for s in sym.IterLeafSymbols())
a = sum(s.after_symbol.pss_without_padding if s.after_symbol else 0
for s in sym.IterLeafSymbols())
- pss_field = '{} ({}->{})'.format(
- _FormatPss(sym.pss, True), _FormatPss(b), _FormatPss(a))
+ pss_field = '{} ({}->{})'.format(pss_field, _FormatPss(b), _FormatPss(a))
elif sym.num_aliases > 1:
- pss_field = '{} (size={})'.format(_FormatPss(sym.pss), sym.size)
- else:
- pss_field = '{}'.format(_FormatPss(sym.pss))
+ pss_field = '{} (size={})'.format(pss_field, sym.size)
if self.verbose:
if last_field:
@@ -189,40 +287,58 @@ class Describer(object):
yield l
def _DescribeSymbolGroup(self, group):
- total_size = group.pss
- section_sizes = collections.defaultdict(float)
- for s in group.IterLeafSymbols():
- section_sizes[s.section_name] += s.pss
+ if self.summarize:
+ total_size = group.pss
+ section_sizes = collections.defaultdict(float)
+ for s in group.IterLeafSymbols():
+ section_sizes[s.section_name] += s.pss
+ histogram = Histogram()
+ for s in group:
+ histogram.Add(s.pss)
# Apply this filter after calcualating size since an alias being removed
# causes some symbols to be UNCHANGED, yet have pss != 0.
if group.IsDelta() and not self.verbose:
group = group.WhereDiffStatusIs(models.DIFF_STATUS_UNCHANGED).Inverted()
- unique_paths = set()
- for s in group.IterLeafSymbols():
- # Ignore paths like foo/{shared}/2
- if '{' not in s.object_path:
- unique_paths.add(s.object_path)
+ if self.summarize:
+ unique_paths = set()
+ for s in group.IterLeafSymbols():
+ # Ignore paths like foo/{shared}/2
+ if '{' not in s.object_path:
+ unique_paths.add(s.object_path)
- if group.IsDelta():
- unique_part = 'aliases not grouped for diffs'
+ if group.IsDelta():
+ unique_part = 'aliases not grouped for diffs'
+ else:
+ unique_part = '{:,} unique'.format(group.CountUniqueSymbols())
+
+ relevant_sections = [
+ s for s in models.SECTION_TO_SECTION_NAME.itervalues()
+ if s in section_sizes]
+ if models.SECTION_NAME_MULTIPLE in relevant_sections:
+ relevant_sections.remove(models.SECTION_NAME_MULTIPLE)
+
+ size_summary = ' '.join(
+ '{}={:<10}'.format(k, _PrettySize(int(section_sizes[k])))
+ for k in relevant_sections)
+ size_summary += ' total={:<10}'.format(_PrettySize(int(total_size)))
+
+ section_legend = ', '.join(
+ '{}={}'.format(models.SECTION_NAME_TO_SECTION[k], k)
+ for k in relevant_sections if k in models.SECTION_NAME_TO_SECTION)
+
+ summary_desc = itertools.chain(
+ ['Showing {:,} symbols ({}) with total pss: {} bytes'.format(
+ len(group), unique_part, int(total_size))],
+ histogram.Generate(),
+ [size_summary.rstrip()],
+ ['Number of unique paths: {}'.format(len(unique_paths))],
+ [''],
+ ['Section Legend: {}'.format(section_legend)],
+ )
else:
- unique_part = '{:,} unique'.format(group.CountUniqueSymbols())
-
- relevant_sections = [s for s in models.SECTION_TO_SECTION_NAME.itervalues()
- if s in section_sizes]
- if models.SECTION_NAME_MULTIPLE in relevant_sections:
- relevant_sections.remove(models.SECTION_NAME_MULTIPLE)
-
- size_summary = ' '.join(
- '{}={:<10}'.format(k, _PrettySize(int(section_sizes[k])))
- for k in relevant_sections)
- size_summary += ' total={:<10}'.format(_PrettySize(int(total_size)))
-
- section_legend = ', '.join(
- '{}={}'.format(models.SECTION_NAME_TO_SECTION[k], k)
- for k in relevant_sections if k in models.SECTION_NAME_TO_SECTION)
+ summary_desc = ()
if self.verbose:
titles = 'Index | Running Total | Section@Address | ...'
@@ -232,18 +348,10 @@ class Describer(object):
else:
titles = ('Index | Running Total | Section@Address | PSS | Path')
- header_desc = [
- 'Showing {:,} symbols ({}) with total pss: {} bytes'.format(
- len(group), unique_part, int(total_size)),
- size_summary,
- 'Number of unique paths: {}'.format(len(unique_paths)),
- '',
- 'Section Legend: {}'.format(section_legend),
- titles,
- '-' * 60
- ]
+ header_desc = (titles, '-' * 60)
+
children_desc = self._DescribeSymbolGroupChildren(group)
- return itertools.chain(header_desc, children_desc)
+ return itertools.chain(summary_desc, header_desc, children_desc)
def _DescribeDiffObjectPaths(self, delta_group):
paths_by_status = [set(), set(), set(), set()]
@@ -281,28 +389,39 @@ class Describer(object):
yield ' ' + p
def _DescribeDeltaSymbolGroup(self, delta_group):
- header_template = ('{} symbols added (+), {} changed (~), {} removed (-), '
- '{} unchanged ({})')
- unchanged_msg = '=' if self.verbose else 'not shown'
- counts = delta_group.CountsByDiffStatus()
- num_unique_before_symbols, num_unique_after_symbols = (
- delta_group.CountUniqueSymbols())
- diff_summary_desc = [
- header_template.format(
- counts[models.DIFF_STATUS_ADDED],
- counts[models.DIFF_STATUS_CHANGED],
- counts[models.DIFF_STATUS_REMOVED],
- counts[models.DIFF_STATUS_UNCHANGED],
- unchanged_msg),
- 'Number of unique symbols {} -> {} ({:+})'.format(
- num_unique_before_symbols, num_unique_after_symbols,
- num_unique_after_symbols - num_unique_before_symbols),
- ]
- path_delta_desc = self._DescribeDiffObjectPaths(delta_group)
+ if self.summarize:
+ header_template = ('{} symbols added (+), {} changed (~), '
+ '{} removed (-), {} unchanged ({})')
+ unchanged_msg = '=' if self.verbose else 'not shown'
+ # Apply this filter since an alias being removed causes some symbols to be
+ # UNCHANGED, yet have pss != 0.
+ changed_delta_group = delta_group.WhereDiffStatusIs(
+ models.DIFF_STATUS_UNCHANGED).Inverted()
+ num_inc = sum(1 for s in changed_delta_group if s.pss > 0)
+ num_dec = sum(1 for s in changed_delta_group if s.pss < 0)
+ counts = delta_group.CountsByDiffStatus()
+ num_unique_before_symbols, num_unique_after_symbols = (
+ delta_group.CountUniqueSymbols())
+ diff_summary_desc = [
+ header_template.format(
+ counts[models.DIFF_STATUS_ADDED],
+ counts[models.DIFF_STATUS_CHANGED],
+ counts[models.DIFF_STATUS_REMOVED],
+ counts[models.DIFF_STATUS_UNCHANGED],
+ unchanged_msg),
+ 'Of changed symbols, {} grew, {} shrank'.format(num_inc, num_dec),
+ 'Number of unique symbols {} -> {} ({:+})'.format(
+ num_unique_before_symbols, num_unique_after_symbols,
+ num_unique_after_symbols - num_unique_before_symbols),
+ ]
+ path_delta_desc = itertools.chain(
+ self._DescribeDiffObjectPaths(delta_group), ('',))
+ else:
+ diff_summary_desc = ()
+ path_delta_desc = ()
group_desc = self._DescribeSymbolGroup(delta_group)
- return itertools.chain(diff_summary_desc, path_delta_desc, ('',),
- group_desc)
+ return itertools.chain(diff_summary_desc, path_delta_desc, group_desc)
def _DescribeDeltaSizeInfo(self, diff):
common_metadata = {k: v for k, v in diff.before_metadata.iteritems()
@@ -335,20 +454,6 @@ class Describer(object):
return itertools.chain(metadata_desc, section_desc, coverage_desc, ('',),
group_desc)
- def GenerateLines(self, obj):
- if isinstance(obj, models.DeltaSizeInfo):
- return self._DescribeDeltaSizeInfo(obj)
- if isinstance(obj, models.SizeInfo):
- return self._DescribeSizeInfo(obj)
- if isinstance(obj, models.DeltaSymbolGroup):
- return self._DescribeDeltaSymbolGroup(obj)
- if isinstance(obj, models.SymbolGroup):
- return self._DescribeSymbolGroup(obj)
- if isinstance(obj, models.Symbol):
- return self._DescribeSymbol(obj)
- return (repr(obj),)
-
-
def DescribeSizeInfoCoverage(size_info):
"""Yields lines describing how accurate |size_info| is."""
for section, section_name in models.SECTION_TO_SECTION_NAME.iteritems():
@@ -394,6 +499,107 @@ def DescribeSizeInfoCoverage(size_info):
yield '* 0 symbols have shared ownership'
+class DescriberCsv(Describer):
+ def __init__(self, verbose=False):
+ super(DescriberCsv, self).__init__()
+ self.verbose = verbose
+ self.stringio = cStringIO.StringIO()
+ self.csv_writer = csv.writer(self.stringio)
+
+ def _RenderCsv(self, data):
+ self.stringio.truncate(0)
+ self.csv_writer.writerow(data)
+ return self.stringio.getvalue().rstrip()
+
+ def _DescribeSectionSizes(self, section_sizes):
+ relevant_section_names = _GetSectionSizeInfo(section_sizes)[1]
+
+ if self.verbose:
+ relevant_set = set(relevant_section_names)
+ section_names = sorted(section_sizes.iterkeys())
+ yield self._RenderCsv(['Name', 'Size', 'IsRelevant'])
+ for name in section_names:
+ size = section_sizes[name]
+ yield self._RenderCsv([name, size, int(name in relevant_set)])
+ else:
+ yield self._RenderCsv(['Name', 'Size'])
+ for name in relevant_section_names:
+ size = section_sizes[name]
+ yield self._RenderCsv([name, size])
+
+ def _DescribeDeltaSizeInfo(self, diff):
+ section_desc = self._DescribeSectionSizes(diff.section_sizes)
+ group_desc = self.GenerateLines(diff.symbols)
+ return itertools.chain(section_desc, ('',), group_desc)
+
+ def _DescribeSizeInfo(self, size_info):
+ section_desc = self._DescribeSectionSizes(size_info.section_sizes)
+ group_desc = self.GenerateLines(size_info.symbols)
+ return itertools.chain(section_desc, ('',), group_desc)
+
+ def _DescribeDeltaSymbolGroup(self, delta_group):
+ yield self._RenderSymbolHeader(True);
+ # Apply filter to remove UNCHANGED groups.
+ if not self.verbose:
+ delta_group = delta_group.WhereDiffStatusIs(
+ models.DIFF_STATUS_UNCHANGED).Inverted()
+ for sym in delta_group:
+ yield self._RenderSymbolData(sym)
+
+ def _DescribeSymbolGroup(self, group):
+ yield self._RenderSymbolHeader(False);
+ for sym in group:
+ yield self._RenderSymbolData(sym)
+
+ def _DescribeSymbol(self, sym, single_line=False):
+ yield self._RenderSymbolHeader(sym.IsDelta());
+ yield self._RenderSymbolData(sym)
+
+ def _RenderSymbolHeader(self, isDelta):
+ fields = []
+ fields.append('GroupCount')
+ fields.append('Address')
+ fields.append('SizeWithoutPadding')
+ fields.append('Padding')
+ if isDelta:
+ fields += ['BeforeNumAliases', 'AfterNumAliases']
+ else:
+ fields.append('NumAliases')
+ fields.append('PSS')
+ fields.append('Section')
+ if self.verbose:
+ fields.append('Flags')
+ fields.append('SourcePath')
+ fields.append('ObjectPath')
+ fields.append('Name')
+ if self.verbose:
+ fields.append('FullName')
+ return self._RenderCsv(fields)
+
+ def _RenderSymbolData(self, sym):
+ data = []
+ data.append(len(sym) if sym.IsGroup() else None)
+ data.append(None if sym.IsGroup() else hex(sym.address))
+ data.append(sym.size_without_padding)
+ data.append(sym.padding)
+ if sym.IsDelta():
+ b, a = (None, None) if sym.IsGroup() else (sym.before_symbol,
+ sym.after_symbol)
+ data.append(b.num_aliases if b else None)
+ data.append(a.num_aliases if a else None)
+ else:
+ data.append(sym.num_aliases)
+ data.append(round(sym.pss, 3))
+ data.append(sym.section)
+ if self.verbose:
+ data.append(sym.FlagsString())
+ data.append(sym.source_path);
+ data.append(sym.object_path);
+ data.append(sym.name)
+ if self.verbose:
+ data.append(sym.full_name)
+ return self._RenderCsv(data)
+
def _UtcToLocal(utc):
epoch = time.mktime(utc.timetuple())
@@ -415,9 +621,16 @@ def DescribeMetadata(metadata):
return sorted('%s=%s' % t for t in display_dict.iteritems())
-def GenerateLines(obj, verbose=False, recursive=False):
+def GenerateLines(obj, verbose=False, recursive=False, summarize=True,
+ format_name='text'):
"""Returns an iterable of lines (without \n) that describes |obj|."""
- return Describer(verbose=verbose, recursive=recursive).GenerateLines(obj)
+ if format_name == 'text':
+ d = DescriberText(verbose=verbose, recursive=recursive, summarize=summarize)
+ elif format_name == 'csv':
+ d = DescriberCsv(verbose=verbose)
+ else:
+ raise ValueError('Unknown format_name \'{}\''.format(format_name));
+ return d.GenerateLines(obj)
def WriteLines(lines, func):
diff --git a/chromium/tools/binary_size/libsupersize/integration_test.py b/chromium/tools/binary_size/libsupersize/integration_test.py
index 1a814185c4d..92fa1da2cd4 100755
--- a/chromium/tools/binary_size/libsupersize/integration_test.py
+++ b/chromium/tools/binary_size/libsupersize/integration_test.py
@@ -181,6 +181,19 @@ class IntegrationTest(unittest.TestCase):
return ret
@_CompareWithGolden()
+ def test_Csv(self):
+ with tempfile.NamedTemporaryFile(suffix='.size') as size_file, \
+ tempfile.NamedTemporaryFile(suffix='.txt') as output_file:
+ file_format.SaveSizeInfo(self._CloneSizeInfo(), size_file.name)
+ query = [
+ 'Csv(size_info, to_file=%r)' % output_file.name,
+ ]
+ ret = _RunApp('console', [size_file.name, '--query', '; '.join(query)])
+ with open(output_file.name) as f:
+ ret.extend(l.rstrip() for l in f)
+ return ret
+
+ @_CompareWithGolden()
def test_Diff_NullDiff(self):
with tempfile.NamedTemporaryFile(suffix='.size') as temp_file:
file_format.SaveSizeInfo(self._CloneSizeInfo(), temp_file.name)
diff --git a/chromium/tools/binary_size/libsupersize/main.py b/chromium/tools/binary_size/libsupersize/main.py
index 3fe14dafb4e..a289ec28ebc 100755
--- a/chromium/tools/binary_size/libsupersize/main.py
+++ b/chromium/tools/binary_size/libsupersize/main.py
@@ -48,7 +48,18 @@ class _DiffAction(object):
args.output_directory = None
args.tool_prefix = None
args.inputs = [args.before, args.after]
- args.query = ('Print(Diff(), verbose=%s)' % bool(args.all))
+ args.query = '\n'.join([
+ 'd = Diff()',
+ 'sis = canned_queries.StaticInitializers(d.symbols)',
+ 'count = sis.CountsByDiffStatus()[models.DIFF_STATUS_ADDED]',
+ 'count += sis.CountsByDiffStatus()[models.DIFF_STATUS_REMOVED]',
+ 'if count > 0:',
+ ' print "Static Initializers Diff:"',
+ ' Print(sis, summarize=False)',
+ ' print',
+ ' print "Full diff:"',
+ 'Print(d, verbose=%s)' % bool(args.all),
+ ])
console.Run(args, parser)
@@ -64,7 +75,8 @@ def main():
'Starts an interactive Python console for analyzing .size files.')
actions['diff'] = (
_DiffAction(),
- 'Shorthand for console --query "Print(Diff())"')
+ 'Shorthand for console --query "Print(Diff())" (plus highlights static '
+ 'initializers in diff)')
for name, tup in actions.iteritems():
sub_parser = sub_parsers.add_parser(name, help=tup[1])
diff --git a/chromium/tools/binary_size/libsupersize/nm.py b/chromium/tools/binary_size/libsupersize/nm.py
index 5a711c96eda..cd062588982 100644
--- a/chromium/tools/binary_size/libsupersize/nm.py
+++ b/chromium/tools/binary_size/libsupersize/nm.py
@@ -109,8 +109,7 @@ def _CollectAliasesByAddressAsyncHelper(elf_path, tool_prefix):
def CollectAliasesByAddressAsync(elf_path, tool_prefix):
"""Calls CollectAliasesByAddress in a helper process. Returns a Result."""
def decode(encoded):
- return concurrent.DecodeDictOfLists(
- encoded[0], encoded[1], key_transform=int)
+ return concurrent.DecodeDictOfLists(encoded, key_transform=int)
return concurrent.ForkAndCall(
_CollectAliasesByAddressAsyncHelper, (elf_path, tool_prefix),
decode_func=decode)
@@ -193,7 +192,7 @@ class _BulkObjectFileAnalyzerWorker(object):
paths_by_name = collections.defaultdict(list)
params = list(iter_job_params())
for encoded_ret in concurrent.BulkForkAndCall(_BatchCollectNames, params):
- names_by_path = concurrent.DecodeDictOfLists(*encoded_ret)
+ names_by_path = concurrent.DecodeDictOfLists(encoded_ret)
for path, names in names_by_path.iteritems():
for name in names:
paths_by_name[name].append(path)
@@ -263,7 +262,7 @@ class _BulkObjectFileAnalyzerMaster(object):
encoded_keys_len = int(self._process.stdout.read(8), 16)
encoded_keys = self._process.stdout.read(encoded_keys_len)
encoded_values = self._process.stdout.read()
- return concurrent.DecodeDictOfLists(encoded_keys, encoded_values)
+ return concurrent.DecodeDictOfLists((encoded_keys, encoded_values))
BulkObjectFileAnalyzer = _BulkObjectFileAnalyzerMaster
diff --git a/chromium/tools/cfi/blacklist.txt b/chromium/tools/cfi/blacklist.txt
index 8241a0546bf..05cd2776444 100644
--- a/chromium/tools/cfi/blacklist.txt
+++ b/chromium/tools/cfi/blacklist.txt
@@ -1,6 +1,3 @@
-# TODO(thakis): Remove clang is past r310132, works around r309617.
-type:std::*
-
# e.g. RolloverProtectedTickClock
fun:*MutableInstance*
@@ -13,6 +10,9 @@ fun:*ThreadSpecific*
# Mesa contains several bad casts.
src:*third_party/mesa*
+# LLVM's allocator
+src:*third_party/swiftshader/third_party/llvm-subzero/include/llvm/Support/Allocator.h*
+
# Deliberate bad cast to derived class to hide functions.
type:*BlockIUnknownMethods*
type:*BlockRefType*
diff --git a/chromium/tools/checklicenses/checklicenses.py b/chromium/tools/checklicenses/checklicenses.py
index ecd2a15ee30..3c84c26f6a6 100755
--- a/chromium/tools/checklicenses/checklicenses.py
+++ b/chromium/tools/checklicenses/checklicenses.py
@@ -119,11 +119,6 @@ PATH_SPECIFIC_WHITELISTED_LICENSES = {
'UNKNOWN',
],
- # http://code.google.com/p/google-breakpad/issues/detail?id=450
- 'breakpad/src': [
- 'UNKNOWN',
- ],
-
'buildtools/third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=25980
'UNKNOWN',
@@ -186,6 +181,11 @@ PATH_SPECIFIC_WHITELISTED_LICENSES = {
'UNKNOWN',
],
+ # https://crbug.com/google-breakpad/450
+ 'third_party/breakpad/breakpad': [
+ 'UNKNOWN',
+ ],
+
# http://crbug.com/603946
# https://github.com/google/oauth2client/issues/331
# Just imports googleapiclient. Chromite is not shipped.
diff --git a/chromium/tools/checkteamtags/PRESUBMIT.py b/chromium/tools/checkteamtags/PRESUBMIT.py
index 542eb1f622f..8817ea0d92a 100644
--- a/chromium/tools/checkteamtags/PRESUBMIT.py
+++ b/chromium/tools/checkteamtags/PRESUBMIT.py
@@ -8,8 +8,6 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API.
"""
-import subprocess
-
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
@@ -26,13 +24,15 @@ def _CommonChecks(input_api, output_api):
results.extend(_RunPyLint(input_api, output_api))
return results
+
def _RunUnitTests(input_api, output_api):
"""Runs unit tests for checkteamtags."""
repo_root = input_api.change.RepositoryRoot()
checkteamtags_dir = input_api.os_path.join(repo_root, 'tools',
'checkteamtags')
test_runner = input_api.os_path.join(checkteamtags_dir, 'run_tests')
- return_code = subprocess.call(['python', test_runner])
+ return_code = input_api.subprocess.call(
+ [input_api.python_executable, test_runner])
if return_code:
message = 'Checkteamtags unit tests did not all pass.'
return [output_api.PresubmitError(message)]
diff --git a/chromium/tools/chrome_extensions/chromium_code_coverage/js/app.js b/chromium/tools/chrome_extensions/chromium_code_coverage/js/app.js
deleted file mode 100644
index 322c309857d..00000000000
--- a/chromium/tools/chrome_extensions/chromium_code_coverage/js/app.js
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-/**
- * @fileoverview Main module for the Chromium Code Coverage extension. This
- * extension adds incremental and absolute code coverage stats
- * to the deprecated Rietveld UI. Stats are added inline with
- * file names as percentage of lines covered.
- */
-
- var coverage = coverage || {};
-
-/**
- * Contains all required configuration information.
- *
- * @type {Object}
- * @const
- */
-coverage.CONFIG = {};
-
-/**
- * URLs necessary for each project. These are necessary because the Rietveld
- * sites are used by other projects as well, and is is only possible to find
- * coverage stats for the projects registered here.
- *
- * @type {Object}
- * @const
- */
-coverage.CONFIG.COVERAGE_REPORT_URLS = {
- 'Android': {
- prefix: 'https://build.chromium.org/p/tryserver.chromium.linux/builders/' +
- 'android_coverage/builds/',
- suffix: '/steps/Incremental%20coverage%20report/logs/json.output',
- botUrl: 'http://build.chromium.org/p/tryserver.chromium.linux/builders/' +
- 'android_coverage'
- },
- 'iOS': {
- prefix: 'https://uberchromegw.corp.google.com/i/internal.bling.tryserver/' +
- 'builders/coverage/builds/',
- suffix: '/steps/coverage/logs/json.output',
- botUrl: 'https://uberchromegw.corp.google.com/i/internal.bling.tryserver/' +
- 'builders/coverage'
- }
-};
-
-/**
- * URLs where Rietveld apps are served. URLs should be escaped properly so that
- * they are ready to be used in regular expressions.
- *
- * @type {Array.<string>}
- */
-coverage.CONFIG.CODE_REVIEW_URLS = [
- 'https:\\/\\/codereview\\.chromium\\.org',
- 'https:\\/\\/chromereviews\\.googleplex\\.com'
-];
-
-/**
- * String representing absolute coverage.
- *
- * @type {string}
- * @const
-*/
-coverage.ABSOLUTE_COVERAGE = 'absolute';
-
-/**
- * String representing incremental coverage.
- *
- * @type {string}
- * @const
-*/
-coverage.INCREMENTAL_COVERAGE = 'incremental';
-
-/**
- * String representing patch incremental coverage.
- *
- * @type {string}
- * @const
- */
-coverage.PATCH_COVERAGE = 'patch';
-
-/**
- * Fetches detailed coverage stats for a given patch set and injects them into
- * the code review page.
- *
- * @param {Element} patchElement Div containing a single patch set.
- * @param {string} botUrl Location of the detailed coverage bot results.
- * @param {string} projectName The name of project to which code was submitted.
- */
-coverage.injectCoverageStats = function(patchElement, botUrl, projectName) {
- var buildNumber = botUrl.split('/').pop();
- var patch = new coverage.PatchSet(projectName, buildNumber);
- patch.getCoverageData(function(patchStats) {
- coverage.updateUi(patchStats, patchElement, patch.getCoverageReportUrl());
- });
-};
-
-/**
- * Adds coverage stats to the table containing files changed for a given patch.
- *
- * @param {Object} patchStats Object containing stats for a given patch set.
- * @param {Element} patchElement Div containing a patch single set.
- * @param {string} reportUrl Location of the detailed coverage stats for this
- * patch.
- */
-coverage.updateUi = function(patchStats, patchElement, reportUrl) {
- // Add absolute and incremental coverage column headers.
- var patchSetTableBody = patchElement.getElementsByTagName('tbody')[0];
- var headerRow = patchSetTableBody.firstElementChild;
- coverage.appendElementBeforeChild(headerRow, 'th', '&Delta;Cov.', 1);
- coverage.appendElementBeforeChild(headerRow, 'th', '|Cov.|', 1);
-
- // Add absolute and incremental coverage stats for each file.
- var fileRows = patchElement.querySelectorAll('[name=patch]');
- for (var i = 0; i < fileRows.length; i++) {
- var sourceFileRow = fileRows[i];
- var fileName = sourceFileRow.children[2].textContent.trim();
-
- var incrementalPercent = null;
- var absolutePercent = null;
- if (patchStats[fileName]) {
- incrementalPercent = patchStats[fileName][coverage.INCREMENTAL_COVERAGE];
- absolutePercent = patchStats[fileName][coverage.ABSOLUTE_COVERAGE];
- }
-
- coverage.appendElementBeforeChild(
- sourceFileRow, 'td', coverage.formatPercent(incrementalPercent), 2);
-
- coverage.appendElementBeforeChild(
- sourceFileRow, 'td', coverage.formatPercent(absolutePercent), 2);
- }
- // Add the overall coverage stats for the patch.
- coverage.addPatchSummaryStats(
- patchElement, patchStats[coverage.PATCH_COVERAGE], reportUrl);
-};
-
-/**
- * Formats percent for presentation on the page.
- *
- * @param {number} coveragePercent
- * @return {string} Formatted string ready to be added to the the DOM.
- */
-coverage.formatPercent = function(coveragePercent) {
- if (!coveragePercent) {
- return '-';
- } else {
- return coveragePercent + '%';
- }
-};
-
-/**
- * Adds summary line to a patch element: "Cov. for this patch: 45%. Details".
- *
- * @param {Element} patchElement Div containing a patch single patch set.
- * @param {number} coveragePercent Incremental coverage for entire patch.
- * @param {string} coverageReportUrl Location of detailed coverage report.
- */
-coverage.addPatchSummaryStats = function(
- patchElement, coveragePercent, coverageReportUrl) {
- var summaryElement = document.createElement('div');
- var patchSummaryHtml = '&Delta;Cov. for this patch: ' +
- coverage.formatPercent(coveragePercent) + '.&nbsp;';
- var detailsHtml = '<a href="' + coverageReportUrl + '">Details</a>';
- summaryElement.innerHTML = patchSummaryHtml + ' ' + detailsHtml;
-
- // Insert the summary line immediately after the table containing the changed
- // files for the patch.
- var tableElement = patchElement.getElementsByTagName('table')[0];
- tableElement.parentNode.insertBefore(
- summaryElement, tableElement.nextSibling);
-};
-
-/**
- * Creates and prepends an element before another.
- *
- * @param {Element} parentElement The parent of the element to prepend a new
- * element to.
- * @param {string} elementType The tag name for the new element.
- * @param {string} innerHtml The value to set as the new element's innerHTML
- * @param {number} childNumber The index of the child to prepend to.
- */
-coverage.appendElementBeforeChild = function(
- parentElement, elementType, innerHtml, childNumber) {
- var newElement = document.createElement(elementType);
- newElement.innerHTML = innerHtml;
- parentElement.insertBefore(newElement, parentElement.children[childNumber]);
-};
-
-/**
- * Checks if the given URL has been registered or not.
- *
- * @param {string} botUrl The URL to be verified.
- * @return {boolean} Whether or not the provided URL was valid.
- */
-coverage.isValidBotUrl = function(botUrl) {
- if (!botUrl) {
- return false;
- }
- for (var project in coverage.CONFIG.COVERAGE_REPORT_URLS) {
- var candidateUrl = coverage.CONFIG.COVERAGE_REPORT_URLS[project]['botUrl'];
- if (botUrl.indexOf(candidateUrl) > - 1) {
- return true;
- }
- }
- return false;
-};
-
-/**
- * Returns the project name for the given bot URL. This function expects the bot
- * URL to be valid.
- *
- * @param {botUrl} botUrl
- * @return {string} The project name for the given bot URL.
- * @throws {Error} If an invalid bot URL is supplied.
- */
-coverage.getProjectNameFromBotUrl = function(botUrl) {
- if (!botUrl) {
- throw Error(botUrl + ' is an invalid bot url.');
- }
- for (var project in coverage.CONFIG.COVERAGE_REPORT_URLS) {
- var candidateUrl = coverage.CONFIG.COVERAGE_REPORT_URLS[project]['botUrl'];
- if (botUrl.indexOf(candidateUrl) > - 1) {
- return project;
- }
- }
- throw Error(botUrl + ' is not registered.');
-};
-
-
-/**
- * Finds the coverage bot URL.
- *
- * @param {Element} patchElement Div to search for bot URL.
- * @return {string} Returns the URL to the bot details page.
- */
-coverage.getValidBotUrl = function(patchElement) {
- var bots = patchElement.getElementsByClassName('build-result');
- for (var i = 0; i < bots.length; i++) {
- if (bots[i].getAttribute('status') === 'success' &&
- coverage.isValidBotUrl(bots[i].href)) {
- return bots[i].href;
- }
- }
- return null;
-};
-
-/**
- * Checks to see if the URL points to a CL review and not another page on the
- * code review site (i.e. settings).
- *
- * @param {string} url The URL to verify.
- * @return {boolean} Whether or not the URL points to a CL review.
- */
-coverage.isValidReviewUrl = function(url) {
- baseUrls = coverage.CONFIG.CODE_REVIEW_URLS.join('|');
- // Matches baseurl.com/numeric-digits and baseurl.com/numeric-digits/anything
- var re = new RegExp('(' + baseUrls + ')/[\\d]+(\\/|$)', 'i');
- return !!url.match(re);
-};
-
-/**
- * Verifies that the user is using the deprecated UI.
- *
- * @return {boolean} Whether or not the deprecated UI is being used.
- */
-coverage.isDeprecatedUi = function() {
- // The tag is present in the new UI only.
- return document.getElementsByTagName('cr-app').length == 0;
-};
-
-/**
- * Returns the newest patch set element.
- *
- * @return {Element} The main div for the last patch set.
- */
-coverage.getLastPatchElement = function() {
- var patchElement = document.querySelectorAll('div[id^="ps-"');
- return patchElement[patchElement.length - 1];
-};
-
-/**
- * Model that describes a patch set.
- *
- * @param {string} projectName The name of the project.
- * @param {string} buildNumber The build number for the bot run corresponding to
- * this patch set.
- * @constructor
- */
-coverage.PatchSet = function(projectName, buildNumber) {
- /**
- * Location of the detailed coverage JSON report.
- * @type {string}
- * @private
- */
- this.coverageReportUrl_ = this.getCoverageReportUrl(projectName, buildNumber);
-};
-
-/**
- * Returns the coverage report URL.
- *
- * @param {string} projectName The name of the project.
- * @param {string} buildNumber The build number for the bot run corresponding
- * to this patch set.
- * @return {string} The URL to the detailed coverage report.
- */
-coverage.PatchSet.prototype.getCoverageReportUrl = function(
- projectName, buildNumber) {
- if (!this.coverageReportUrl_) {
- var reportUrl = coverage.CONFIG.COVERAGE_REPORT_URLS[projectName];
- this.coverageReportUrl_ = reportUrl['prefix'] + buildNumber +
- reportUrl['suffix'];
- }
- return this.coverageReportUrl_;
-};
-
-/**
- * Returns the detailed coverage report. Caller must handle what happens
- * when the report is received. No side effects if report isn't sent.
- *
- * @param {function} success The callback to be invoked when the report is
- * received. Invoked with an object mapping file names to
- * coverage stats as the only argument.
- */
-coverage.PatchSet.prototype.getCoverageData = function(success) {
- var client = new coverage.HttpClient();
- client.get(this.coverageReportUrl_, (function(data) {
- var resultDict = JSON.parse(data);
- var coveragePercentages = this.getCoveragePercentForFiles(resultDict);
- success(coveragePercentages);
- }).bind(this));
-};
-
-/**
- * Extracts the coverage percent for each file from the coverage report.
- *
- * @param {Object} reportDict The detailed coverage report.
- * @return {Object} An object containing the coverage percent for each file and
- * the patch coverage percent.
- */
-coverage.PatchSet.prototype.getCoveragePercentForFiles = function(reportDict) {
- var fileDict = reportDict['files'];
- var coveragePercentages = {};
-
- for (var fileName in fileDict) {
- if (fileDict.hasOwnProperty(fileName)) {
- coveragePercentages[fileName] = {};
- var coverageDict = fileDict[fileName];
-
- coveragePercentages[fileName][coverage.ABSOLUTE_COVERAGE] =
- this.getCoveragePercent(coverageDict, coverage.ABSOLUTE_COVERAGE);
-
- coveragePercentages[fileName][coverage.INCREMENTAL_COVERAGE] =
- this.getCoveragePercent(coverageDict, coverage.INCREMENTAL_COVERAGE);
- }
- }
- coveragePercentages[coverage.PATCH_COVERAGE] =
- this.getCoveragePercent(reportDict[coverage.PATCH_COVERAGE],
- coverage.INCREMENTAL_COVERAGE);
- return coveragePercentages;
-};
-
-/**
- * Returns the coverage percent given the number of total and covered lines.
- *
- * @param {Object} coverageDict Object containing absolute and incremental
- * number of lines covered.
- * @param {string} coverageType Either 'incremental' or 'absolute'.
- * @return {number} The coverage percent.
- */
-coverage.PatchSet.prototype.getCoveragePercent = function(
- coverageDict, coverageType) {
- if (!coverageDict ||
- (coverageType !== coverage.INCREMENTAL_COVERAGE &&
- coverageType !== coverage.ABSOLUTE_COVERAGE) ||
- parseFloat(total) === 0) {
- return null;
- }
- var covered = coverageDict[coverageType]['covered'];
- var total = coverageDict[coverageType]['total'];
- return Math.round(
- (parseFloat(covered) / parseFloat(total)) * 100);
-};
-
-/**
- * Model describing a simple HTTP client. Only supports GET requests.
- */
-coverage.HttpClient = function() {
-};
-
-/**
- * HTTP GET that only handles successful requests.
- *
- * @param {string} url The URL to make a GET request to.
- * @param {function} success The callback invoked when the request is finished
- * successfully. Callback is invoked with response text as
- * the only argument.
- */
-coverage.HttpClient.prototype.get = function(url, success) {
- // TODO(estevenson): Handle failure when user isn't authenticated.
- var http = new XMLHttpRequest();
- http.onreadystatechange = function() {
- if (http.readyState === 4 && http.status === 200) {
- success(http.responseText);
- }
- };
-
- http.open('GET', url + '/text', true);
- http.send(null);
-};
-
-// Verifies that page might contain a patch set with a valid coverage bot.
-if (coverage.isDeprecatedUi() &&
- coverage.isValidReviewUrl(window.location.href)) {
- var patchElement = coverage.getLastPatchElement();
- var botUrl = coverage.getValidBotUrl(patchElement);
- if (botUrl) {
- var projectName = coverage.getProjectNameFromBotUrl(botUrl);
- coverage.injectCoverageStats(patchElement, botUrl, projectName);
- }
-}
diff --git a/chromium/tools/chrome_extensions/chromium_code_coverage/manifest.json b/chromium/tools/chrome_extensions/chromium_code_coverage/manifest.json
deleted file mode 100644
index b8eb440c5df..00000000000
--- a/chromium/tools/chrome_extensions/chromium_code_coverage/manifest.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "manifest_version": 2,
- "name": "Chromium code coverage: deprecated UI",
- "description": "Adds coverage stats to Rietveld.",
- "version": "1.0.0",
- "permissions": [
- "https://uberchromegw.corp.google.com/",
- "https://build.chromium.org/"
- ],
- "content_scripts": [{
- "matches": ["https://codereview.chromium.org/*",
- "https://chromereviews.googleplex.com/*"],
- "js": [
- "js/app.js"
- ],
- "run_at": "document_end"
- }]
-}
diff --git a/chromium/tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py b/chromium/tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py
index df48a6268aa..7b58bcf7850 100644
--- a/chromium/tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py
+++ b/chromium/tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py
@@ -6,6 +6,7 @@ from common.chrome_proxy_benchmark import ChromeProxyBenchmark
from integration_tests import chrome_proxy_measurements as measurements
from integration_tests import chrome_proxy_pagesets as pagesets
from telemetry import benchmark
+from telemetry import decorators
DESKTOP_PLATFORMS = ['mac', 'linux', 'win', 'chromeos']
WEBVIEW_PLATFORMS = ['android-webview', 'android-webview-instrumentation']
@@ -49,7 +50,7 @@ class ChromeProxyClientType(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.client_type.client_type'
-@benchmark.Disabled(*WEBVIEW_PLATFORMS)
+@decorators.Disabled(*WEBVIEW_PLATFORMS)
class ChromeProxyLoFi(ChromeProxyBenchmark):
tag = 'lo_fi'
test = measurements.ChromeProxyLoFi
@@ -60,7 +61,7 @@ class ChromeProxyLoFi(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.lo_fi.lo_fi'
-@benchmark.Disabled(*WEBVIEW_PLATFORMS)
+@decorators.Disabled(*WEBVIEW_PLATFORMS)
class ChromeProxyCacheLoFiDisabled(ChromeProxyBenchmark):
tag = 'cache_lo_fi_disabled'
test = measurements.ChromeProxyCacheLoFiDisabled
@@ -71,7 +72,7 @@ class ChromeProxyCacheLoFiDisabled(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.lo_fi.cache_lo_fi_disabled'
-@benchmark.Disabled(*WEBVIEW_PLATFORMS)
+@decorators.Disabled(*WEBVIEW_PLATFORMS)
class ChromeProxyCacheProxyDisabled(ChromeProxyBenchmark):
tag = 'cache_proxy_disabled'
test = measurements.ChromeProxyCacheProxyDisabled
@@ -82,7 +83,7 @@ class ChromeProxyCacheProxyDisabled(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.lo_fi.cache_proxy_disabled'
-@benchmark.Disabled(*WEBVIEW_PLATFORMS)
+@decorators.Disabled(*WEBVIEW_PLATFORMS)
class ChromeProxyLitePage(ChromeProxyBenchmark):
tag = 'lite_page'
test = measurements.ChromeProxyLitePage
@@ -143,7 +144,7 @@ class ChromeProxyHTML5Test(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.html5test.html5test'
-@benchmark.Enabled(*DESKTOP_PLATFORMS)
+@decorators.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyYouTube(ChromeProxyBenchmark):
tag = 'youtube'
test = measurements.ChromeProxyYouTube
@@ -174,7 +175,7 @@ class ChromeProxyBlockOnce(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.block_once.block_once'
-@benchmark.Disabled(*(DESKTOP_PLATFORMS + WEBVIEW_PLATFORMS))
+@decorators.Disabled(*(DESKTOP_PLATFORMS + WEBVIEW_PLATFORMS))
# Safebrowsing is enabled for Android and iOS.
class ChromeProxySafeBrowsingOn(ChromeProxyBenchmark):
tag = 'safebrowsing_on'
@@ -191,7 +192,7 @@ class ChromeProxySafeBrowsingOn(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.safebrowsing_on.safebrowsing'
-@benchmark.Enabled(*(DESKTOP_PLATFORMS + WEBVIEW_PLATFORMS))
+@decorators.Enabled(*(DESKTOP_PLATFORMS + WEBVIEW_PLATFORMS))
# Safebrowsing is switched off for Android Webview and all desktop platforms.
class ChromeProxySafeBrowsingOff(ChromeProxyBenchmark):
tag = 'safebrowsing_off'
@@ -283,7 +284,7 @@ class ChromeProxyClientConfig(ChromeProxyBenchmark):
return 'chrome_proxy_benchmark.client_config.synthetic'
-@benchmark.Enabled(*DESKTOP_PLATFORMS)
+@decorators.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoDirect(benchmark.Benchmark):
tag = 'video'
test = measurements.ChromeProxyVideoValidation
@@ -294,7 +295,7 @@ class ChromeProxyVideoDirect(benchmark.Benchmark):
return 'chrome_proxy_benchmark.video.direct'
-@benchmark.Enabled(*DESKTOP_PLATFORMS)
+@decorators.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoProxied(benchmark.Benchmark):
tag = 'video'
test = measurements.ChromeProxyVideoValidation
@@ -305,7 +306,7 @@ class ChromeProxyVideoProxied(benchmark.Benchmark):
return 'chrome_proxy_benchmark.video.proxied'
-@benchmark.Enabled(*DESKTOP_PLATFORMS)
+@decorators.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoCompare(benchmark.Benchmark):
"""Comparison of direct and proxied video fetches.
@@ -321,7 +322,7 @@ class ChromeProxyVideoCompare(benchmark.Benchmark):
def Name(cls):
return 'chrome_proxy_benchmark.video.compare'
-@benchmark.Enabled(*DESKTOP_PLATFORMS)
+@decorators.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoFrames(benchmark.Benchmark):
"""Check for video frames similar to original video."""
@@ -333,7 +334,7 @@ class ChromeProxyVideoFrames(benchmark.Benchmark):
def Name(cls):
return 'chrome_proxy_benchmark.video.frames'
-@benchmark.Enabled(*DESKTOP_PLATFORMS)
+@decorators.Enabled(*DESKTOP_PLATFORMS)
class ChromeProxyVideoAudio(benchmark.Benchmark):
"""Check that audio is similar to original video."""
diff --git a/chromium/tools/chrome_proxy/webdriver/client_config.py b/chromium/tools/chrome_proxy/webdriver/client_config.py
index 5a5ffdd2674..f8835fb1f8c 100644
--- a/chromium/tools/chrome_proxy/webdriver/client_config.py
+++ b/chromium/tools/chrome_proxy/webdriver/client_config.py
@@ -33,10 +33,11 @@ class ClientConfig(IntegrationTest):
# Ensure client config is fetched at the start of the Chrome session, and the
# variations ID is set in the request.
+ # Disabled on android because the net log is not copied yet. crbug.com/761507
@ChromeVersionEqualOrAfterM(62)
def testClientConfigVariationsHeader(self):
with TestDriver() as t:
- t.AddChromeArg('--log-net-log=chrome.netlog.json')
+ t.UseNetLog()
t.AddChromeArg('--enable-spdy-proxy-auth')
# Force set the variations ID, so they are send along with the client
# config fetch request.
@@ -44,51 +45,50 @@ class ClientConfig(IntegrationTest):
t.LoadURL('http://check.googlezip.net/test.html')
- variation_header_count = 0
+ variation_header_count = 0
- # Look for the request made to data saver client config server.
- with open('chrome.netlog.json') as data_file:
- data = json.load(data_file)
- for i in data["events"]:
- dumped_event = json.dumps(i)
- if dumped_event.find("datasaver.googleapis.com") !=-1 and\
- dumped_event.find("clientConfigs") != -1 and\
- dumped_event.find("headers") != -1 and\
- dumped_event.find("accept-encoding") != -1 and\
- dumped_event.find("x-client-data") !=-1:
- variation_header_count = variation_header_count + 1
+ # Look for the request made to data saver client config server.
+ data = t.StopAndGetNetLog()
+ for i in data["events"]:
+ dumped_event = json.dumps(i)
+ if dumped_event.find("datasaver.googleapis.com") !=-1 and\
+ dumped_event.find("clientConfigs") != -1 and\
+ dumped_event.find("headers") != -1 and\
+ dumped_event.find("accept-encoding") != -1 and\
+ dumped_event.find("x-client-data") !=-1:
+ variation_header_count = variation_header_count + 1
- # Variation IDs are set. x-client-data should be present in the request
- # headers.
- self.assertLessEqual(1, variation_header_count)
+ # Variation IDs are set. x-client-data should be present in the request
+ # headers.
+ self.assertLessEqual(1, variation_header_count)
# Ensure client config is fetched at the start of the Chrome session, and the
# variations ID is not set in the request.
+ # Disabled on android because the net log is not copied yet. crbug.com/761507
@ChromeVersionEqualOrAfterM(62)
def testClientConfigNoVariationsHeader(self):
with TestDriver() as t:
- t.AddChromeArg('--log-net-log=chrome.netlog.json')
+ t.UseNetLog()
t.AddChromeArg('--enable-spdy-proxy-auth')
t.LoadURL('http://check.googlezip.net/test.html')
- variation_header_count = 0
+ variation_header_count = 0
- # Look for the request made to data saver client config server.
- with open('chrome.netlog.json') as data_file:
- data = json.load(data_file)
- for i in data["events"]:
- dumped_event = json.dumps(i)
- if dumped_event.find("datasaver.googleapis.com") !=-1 and\
- dumped_event.find("clientConfigs") != -1 and\
- dumped_event.find("headers") != -1 and\
- dumped_event.find("accept-encoding") != -1 and\
- dumped_event.find("x-client-data") !=-1:
- variation_header_count = variation_header_count + 1
+ # Look for the request made to data saver client config server.
+ data = t.StopAndGetNetLog()
+ for i in data["events"]:
+ dumped_event = json.dumps(i)
+ if dumped_event.find("datasaver.googleapis.com") !=-1 and\
+ dumped_event.find("clientConfigs") != -1 and\
+ dumped_event.find("headers") != -1 and\
+ dumped_event.find("accept-encoding") != -1 and\
+ dumped_event.find("x-client-data") !=-1:
+ variation_header_count = variation_header_count + 1
- # Variation IDs are not set. x-client-data should not be present in the
- # request headers.
- self.assertEqual(0, variation_header_count)
+ # Variation IDs are not set. x-client-data should not be present in the
+ # request headers.
+ self.assertEqual(0, variation_header_count)
if __name__ == '__main__':
IntegrationTest.RunAllTests() \ No newline at end of file
diff --git a/chromium/tools/chrome_proxy/webdriver/common.py b/chromium/tools/chrome_proxy/webdriver/common.py
index 945ccbf5ca6..85600ccd16c 100644
--- a/chromium/tools/chrome_proxy/webdriver/common.py
+++ b/chromium/tools/chrome_proxy/webdriver/common.py
@@ -6,10 +6,13 @@ import argparse
import json
import logging
import os
+import random
import re
import socket
import shlex
+import subprocess
import sys
+import tempfile
import time
import traceback
import unittest
@@ -148,6 +151,18 @@ def GetLogger(name='common'):
logger.initialized = True
return logger
+def _RunAdbCmd(args):
+ """Runs an adb command with the given arguments.
+
+ Args:
+ args: an array of string arguments
+ """
+ proc = subprocess.Popen(['adb'] + args, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if proc.returncode:
+ raise Exception("ADB command failed. Output: %s" % (stdout + stderr))
+
class TestDriver:
"""The main driver for an integration test.
@@ -177,6 +192,7 @@ class TestDriver:
self._logger = GetLogger(name='TestDriver')
self._has_logs = False
self._control_network_connection = control_network_connection
+ self._net_log = None
self._network_connection = None
def __enter__(self):
@@ -185,6 +201,18 @@ class TestDriver:
def __exit__(self, exc_type, exc_value, tb):
if self._driver:
self._StopDriver()
+ if self._net_log and self._flags.android:
+ try:
+ _RunAdbCmd('shell', 'rm', '-f', self._net_log)
+ except:
+ # Ignore errors, give only an attempt to rm the temp file
+ pass
+ if self._net_log and not self._flags.android:
+ try:
+ os.remove(self._net_log)
+ except:
+ # Ignore errors, give only an attempt to rm the temp file
+ pass
def _OverrideChromeArgs(self):
"""Overrides any given arguments in the code with those given on the command
@@ -324,6 +352,21 @@ class TestDriver:
'clearHostResolverCache();}')
self._logger.info('Cleared browser cache. Returned=%s', str(res))
+ def UseNetLog(self):
+ """Requests that a Chrome netlog be available for test evaluation.
+ """
+ if self._driver:
+ raise Exception("UseNetLog() must be called before LoadURL()")
+ temp_basename = "chrome.netlog.%05d.json" % random.randint(1, 100000)
+ temp_dir = tempfile.gettempdir()
+ if self._flags.android:
+ temp_dir = '/data/local/tmp'
+ temp_file = os.path.join(temp_dir, temp_basename)
+ if self._flags.android:
+ _RunAdbCmd(['shell', 'touch', temp_file])
+ self.AddChromeArg('--log-net-log=%s' % temp_file)
+ self._net_log = temp_file
+
def SetNetworkConnection(self, connection_type):
"""Changes the emulated connection type.
@@ -453,6 +496,38 @@ class TestDriver:
raise Exception('%s not true after %f seconds' % (expression, timeout))
return result
+ def StopAndGetNetLog(self):
+ """Stops the browser and returns the parsed net log.
+
+ Must be called after UseNetLog(). Will attempt to fix an unfinished netlog
+ dump if initial parse fails.
+
+ Returns: the parsed netlog dict object
+ """
+ if self._driver:
+ self._StopDriver()
+ # Give a moment for Chrome to close and finish writing the netlog.
+ if not self._net_log:
+ raise Exception('GetParsedNetLog() cannot be called before UseNetLog()')
+ temp_file = self._net_log
+ if self._flags.android:
+ temp_file = os.path.join(tempfile.gettempdir(), 'pulled_netlog.json')
+ _RunAdbCmd(['pull', self._net_log, temp_file])
+ json_file_content = ''
+ with open(temp_file) as f:
+ json_file_content = f.read()
+ try:
+ return json.loads(json_file_content)
+ except:
+ # Using --log-net-log does not guarantee a valid json file. Workaround
+ # copied from
+ # https://cs.chromium.org/chromium/src/third_party/catapult/netlog_viewer/netlog_viewer/log_util.js?l=275&rcl=017fd5cf4ccbcbed7bba20760f1b3d923a7cd3ca
+ end = max(json_file_content.rfind(',\n'), json_file_content.rfind(',\r'))
+ if end == -1:
+ raise Exception('unable to parse netlog json file')
+ json_file_content = json_file_content[:end] + ']}'
+ return json.loads(json_file_content)
+
def GetPerformanceLogs(self, method_filter=r'Network\.responseReceived'):
"""Returns all logged Performance events from Chrome. Raises an Exception if
no pages have been loaded since the last time this function was called.
diff --git a/chromium/tools/chrome_proxy/webdriver/cross_origin_push.py b/chromium/tools/chrome_proxy/webdriver/cross_origin_push.py
index 395c9434e3e..8ac614d2b82 100644
--- a/chromium/tools/chrome_proxy/webdriver/cross_origin_push.py
+++ b/chromium/tools/chrome_proxy/webdriver/cross_origin_push.py
@@ -11,10 +11,11 @@ import json
class CrossOriginPush(IntegrationTest):
# Ensure cross origin push from trusted proxy server is adopted by Chromium.
+ # Disabled on android because the net log is not copied yet. crbug.com/761507
@ChromeVersionEqualOrAfterM(62)
def testClientConfigVariationsHeader(self):
with TestDriver() as t:
- t.AddChromeArg('--log-net-log=chrome.netlog.json')
+ t.UseNetLog()
t.AddChromeArg('--enable-spdy-proxy-auth')
t.AddChromeArg(
'--force-fieldtrial-params=DataReductionProxyServerExperiments'
@@ -26,32 +27,31 @@ class CrossOriginPush(IntegrationTest):
t.LoadURL('http://googleweblight.com/i?u='
'http://check.googlezip.net/test.html')
- promised_stream_count = 0
- adopted_stream_count = 0
+ promised_stream_count = 0
+ adopted_stream_count = 0
- # Look for the request made to data saver client config server.
- with open('chrome.netlog.json') as data_file:
- data = json.load(data_file)
+ # Look for the request made to data saver client config server.
+ data = t.StopAndGetNetLog()
- mapped_const = data["constants"]["logEventTypes"]\
- ["HTTP2_STREAM_ADOPTED_PUSH_STREAM"]
- self.assertLess(0, mapped_const)
+ mapped_const = data["constants"]["logEventTypes"]\
+ ["HTTP2_STREAM_ADOPTED_PUSH_STREAM"]
+ self.assertLess(0, mapped_const)
- for i in data["events"]:
- dumped_event = json.dumps(i)
- if dumped_event.find("chrome-proxy") != -1 and\
- dumped_event.find("check.googlezip.net/test.html") != -1 and\
- dumped_event.find("promised_stream_id") !=-1:
- promised_stream_count = promised_stream_count + 1
+ for i in data["events"]:
+ dumped_event = json.dumps(i)
+ if dumped_event.find("chrome-proxy") != -1 and\
+ dumped_event.find("check.googlezip.net/test.html") != -1 and\
+ dumped_event.find("promised_stream_id") !=-1:
+ promised_stream_count = promised_stream_count + 1
- if dumped_event.find(str(mapped_const)) != -1 and\
- dumped_event.find("check.googlezip.net/test.html") != -1 and\
- dumped_event.find("stream_id") !=-1:
- adopted_stream_count = adopted_stream_count + 1
+ if dumped_event.find(str(mapped_const)) != -1 and\
+ dumped_event.find("check.googlezip.net/test.html") != -1 and\
+ dumped_event.find("stream_id") !=-1:
+ adopted_stream_count = adopted_stream_count + 1
- # Verify that the stream was pushed and adopted.
- self.assertEqual(1, promised_stream_count)
- self.assertEqual(1, adopted_stream_count)
+ # Verify that the stream was pushed and adopted.
+ self.assertEqual(1, promised_stream_count)
+ self.assertEqual(1, adopted_stream_count)
if __name__ == '__main__':
diff --git a/chromium/tools/chrome_proxy/webdriver/lite_page.py b/chromium/tools/chrome_proxy/webdriver/lite_page.py
index 00b03f68a0b..b76979e3ccf 100644
--- a/chromium/tools/chrome_proxy/webdriver/lite_page.py
+++ b/chromium/tools/chrome_proxy/webdriver/lite_page.py
@@ -322,5 +322,41 @@ class LitePage(IntegrationTest):
self.assertNotIn('chrome-proxy-accept-transform',
response.request_headers)
+ # Checks that the server provides a preview (either Lite Page or fallback
+ # to LoFi) for a "heavy" page over a 3G connection.
+ @ChromeVersionEqualOrAfterM(61)
+ def testPreviewProvidedForHeavyPage(self):
+ with TestDriver() as test_driver:
+ test_driver.AddChromeArg('--enable-spdy-proxy-auth')
+ test_driver.AddChromeArg(
+ '--force-fieldtrial-params=NetworkQualityEstimator.Enabled:'
+ 'force_effective_connection_type/3G,'
+ 'DataReductionProxyServerExperiments.Enabled:'
+ 'exp/integration_test_policy')
+ test_driver.AddChromeArg(
+ '--force-fieldtrials=NetworkQualityEstimator/Enabled/'
+ 'DataReductionProxyServerExperiments/Enabled')
+
+ # Open a URL that is specially marked as a heavy page for integration
+ # test purposes (requires using the "exp=integration_test_policy" value
+ # in chrome-proxy header).
+ test_driver.LoadURL('http://check.googlezip.net/previews/heavy_page.html')
+
+ lite_page_responses = 0
+ page_policies_responses = 0
+ for response in test_driver.GetHTTPResponses():
+ self.assertEqual('3G', response.request_headers['chrome-proxy-ect'])
+ self.assertIn('exp=integration_test_policy',
+ response.request_headers['chrome-proxy'])
+ if response.url.endswith('html'):
+ if self.checkLitePageResponse(response):
+ lite_page_responses = lite_page_responses + 1
+ elif 'chrome-proxy' in response.response_headers:
+ self.assertIn('page-policies',
+ response.response_headers['chrome-proxy'])
+ page_policies_responses = page_policies_responses + 1
+
+ self.assertTrue(lite_page_responses == 1 or page_policies_responses == 1)
+
if __name__ == '__main__':
IntegrationTest.RunAllTests()
diff --git a/chromium/tools/chrome_proxy/webdriver/smoke.py b/chromium/tools/chrome_proxy/webdriver/smoke.py
index 50d55262776..debb885356a 100644
--- a/chromium/tools/chrome_proxy/webdriver/smoke.py
+++ b/chromium/tools/chrome_proxy/webdriver/smoke.py
@@ -73,10 +73,6 @@ class Smoke(IntegrationTest):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.AddChromeArg('--enable-data-reduction-proxy-force-pingback')
- t.AddChromeArg('--log-net-log=chrome.netlog.json')
- # Force set the variations ID, so they are send along with the pingback
- # request.
- t.AddChromeArg('--force-variation-ids=42')
t.LoadURL('http://check.googlezip.net/test.html')
t.LoadURL('http://check.googlezip.net/test.html')
t.SleepUntilHistogramHasEntry("DataReductionProxy.Pingback.Succeeded")
@@ -86,22 +82,35 @@ class Smoke(IntegrationTest):
succeeded = t.GetHistogram('DataReductionProxy.Pingback.Succeeded')
self.assertEqual(1, succeeded['count'])
- # Look for the request made to data saver pingback server.
- with open('chrome.netlog.json') as data_file:
- data = json.load(data_file)
- variation_header_count = 0
- for i in data["events"]:
- dumped_event = json.dumps(i)
- if dumped_event.find("datasaver.googleapis.com") !=-1 and\
- dumped_event.find("recordPageloadMetrics") != -1 and\
- dumped_event.find("headers") != -1 and\
- dumped_event.find("accept-encoding") != -1 and\
- dumped_event.find("x-client-data") !=-1:
- variation_header_count = variation_header_count + 1
+ # Ensure pageload metric pingback with DataSaver has the variations header.
+ @ChromeVersionEqualOrAfterM(62)
+ def testPingbackHasVariations(self):
+ with TestDriver() as t:
+ t.AddChromeArg('--enable-spdy-proxy-auth')
+ t.AddChromeArg('--enable-data-reduction-proxy-force-pingback')
+ t.UseNetLog()
+ # Force set the variations ID, so they are send along with the pingback
+ # request.
+ t.AddChromeArg('--force-variation-ids=42')
+ t.LoadURL('http://check.googlezip.net/test.html')
+ t.LoadURL('http://check.googlezip.net/test.html')
+ t.SleepUntilHistogramHasEntry("DataReductionProxy.Pingback.Succeeded")
+
+ # Look for the request made to data saver pingback server.
+ data = t.StopAndGetNetLog()
+ variation_header_count = 0
+ for i in data["events"]:
+ dumped_event = json.dumps(i)
+ if dumped_event.find("datasaver.googleapis.com") !=-1 and\
+ dumped_event.find("recordPageloadMetrics") != -1 and\
+ dumped_event.find("headers") != -1 and\
+ dumped_event.find("accept-encoding") != -1 and\
+ dumped_event.find("x-client-data") !=-1:
+ variation_header_count = variation_header_count + 1
- # Variation IDs are set. x-client-data should be present in the request
- # headers.
- self.assertLessEqual(1, variation_header_count)
+ # Variation IDs are set. x-client-data should be present in the request
+ # headers.
+ self.assertLessEqual(1, variation_header_count)
# Verify unique page IDs are sent in the Chrome-Proxy header.
@ChromeVersionEqualOrAfterM(59)
diff --git a/chromium/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp b/chromium/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp
index bd0ec385eef..36db12aba53 100644
--- a/chromium/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp
+++ b/chromium/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp
@@ -37,8 +37,6 @@ class BlinkGCPluginAction : public PluginASTAction {
options_.warn_unneeded_finalizer = true;
} else if (arg == "enable-weak-members-in-unmanaged-classes") {
options_.enable_weak_members_in_unmanaged_classes = true;
- } else if (arg == "use-chromium-style-naming") {
- // TODO(dcheng): Remove this once the build no longer passes this flag.
} else {
llvm::errs() << "Unknown blink-gc-plugin argument: " << arg << "\n";
return false;
diff --git a/chromium/tools/clang/plugins/ChromeClassTester.cpp b/chromium/tools/clang/plugins/ChromeClassTester.cpp
index e7c4285110a..b0c9c32e6b6 100644
--- a/chromium/tools/clang/plugins/ChromeClassTester.cpp
+++ b/chromium/tools/clang/plugins/ChromeClassTester.cpp
@@ -48,11 +48,8 @@ void ChromeClassTester::CheckTag(TagDecl* tag) {
// We handle class types here where we have semantic information. We can only
// check structs/classes/enums here, but we get a bunch of nice semantic
// information instead of just parsing information.
- if (InBannedNamespace(tag))
- return;
-
SourceLocation location = tag->getInnerLocStart();
- LocationType location_type = ClassifyLocation(location, tag);
+ LocationType location_type = ClassifyLocation(location);
if (location_type == LocationType::kThirdParty)
return;
@@ -80,28 +77,8 @@ void ChromeClassTester::CheckTag(TagDecl* tag) {
}
}
-void ChromeClassTester::emitWarning(SourceLocation loc,
- const char* raw_error) {
- FullSourceLoc full(loc, instance().getSourceManager());
- std::string err;
- err = "[chromium-style] ";
- err += raw_error;
-
- DiagnosticIDs::Level level = getErrorLevel() == DiagnosticsEngine::Error
- ? DiagnosticIDs::Error : DiagnosticIDs::Warning;
-
- unsigned id = diagnostic().getDiagnosticIDs()->getCustomDiagID(level, err);
- DiagnosticBuilder builder = diagnostic().Report(full, id);
-
-}
-
ChromeClassTester::LocationType ChromeClassTester::ClassifyLocation(
- SourceLocation loc,
- const Decl* record) {
- std::string ns = GetNamespace(record);
- if (ns == "blink" || ns == "WTF")
- return LocationType::kBlink;
-
+ SourceLocation loc) {
if (instance().getSourceManager().isInSystemHeader(loc))
return LocationType::kThirdParty;
@@ -118,10 +95,6 @@ ChromeClassTester::LocationType ChromeClassTester::ClassifyLocation(
if (filename == "<scratch space>")
return LocationType::kThirdParty;
- // Don't complain about autogenerated protobuf files.
- if (ends_with(filename, ".pb.h"))
- return LocationType::kThirdParty;
-
#if defined(LLVM_ON_UNIX)
// Resolve the symlinktastic relative path and make it absolute.
char resolvedPath[MAXPATHLEN];
@@ -162,6 +135,15 @@ ChromeClassTester::LocationType ChromeClassTester::ClassifyLocation(
std::replace(filename.begin(), filename.end(), '\\', '/');
#endif
+ // TODO(dcheng, tkent): The WebKit directory is being renamed to Blink. Clean
+ // this up once the rename is done.
+ if (filename.find("/third_party/WebKit/") != std::string::npos ||
+ (filename.find("/third_party/blink/") != std::string::npos &&
+ // Browser-side code should always use the full range of checks.
+ filename.find("/third_party/blink/browser/") == std::string::npos)) {
+ return LocationType::kBlink;
+ }
+
for (const std::string& banned_dir : banned_directories_) {
// If any of the banned directories occur as a component in filename,
// this file is rejected.
@@ -175,14 +157,6 @@ ChromeClassTester::LocationType ChromeClassTester::ClassifyLocation(
return LocationType::kChrome;
}
-bool ChromeClassTester::InBannedNamespace(const Decl* record) {
- std::string n = GetNamespace(record);
- if (!n.empty())
- return banned_namespaces_.find(n) != banned_namespaces_.end();
-
- return false;
-}
-
std::string ChromeClassTester::GetNamespace(const Decl* record) {
return GetNamespaceImpl(record->getDeclContext(), std::string());
}
@@ -225,9 +199,6 @@ bool ChromeClassTester::InImplementationFile(SourceLocation record_location) {
}
void ChromeClassTester::BuildBannedLists() {
- banned_namespaces_.emplace("std");
- banned_namespaces_.emplace("__gnu_cxx");
-
banned_directories_.emplace("/third_party/");
banned_directories_.emplace("/native_client/");
banned_directories_.emplace("/breakpad/");
@@ -235,7 +206,6 @@ void ChromeClassTester::BuildBannedLists() {
banned_directories_.emplace("/ppapi/");
banned_directories_.emplace("/testing/");
banned_directories_.emplace("/v8/");
- banned_directories_.emplace("/sdch/");
banned_directories_.emplace("/frameworks/");
// Don't check autogenerated headers.
diff --git a/chromium/tools/clang/plugins/ChromeClassTester.h b/chromium/tools/clang/plugins/ChromeClassTester.h
index 3091000fbac..7051dbc6ee4 100644
--- a/chromium/tools/clang/plugins/ChromeClassTester.h
+++ b/chromium/tools/clang/plugins/ChromeClassTester.h
@@ -30,15 +30,6 @@ class ChromeClassTester {
clang::CompilerInstance& instance() { return instance_; }
clang::DiagnosticsEngine& diagnostic() { return diagnostic_; }
- // Emits a simple warning; this shouldn't be used if you require printf-style
- // printing.
- // TODO(dcheng): This will be removed. Do not add new usage.
- void emitWarning(clang::SourceLocation loc, const char* error);
-
- // Utility method for subclasses to check if this class is in a banned
- // namespace.
- bool InBannedNamespace(const clang::Decl* record);
-
// Utility method for subclasses to check how a certain SourceLocation should
// be handled. The main criteria for classification is the SourceLocation's
// path (e.g. whether it's in //third_party).
@@ -52,8 +43,7 @@ class ChromeClassTester {
// it doesn't make sense to enforce Chrome's custom diagnostics.
kThirdParty,
};
- LocationType ClassifyLocation(clang::SourceLocation loc,
- const clang::Decl* record);
+ LocationType ClassifyLocation(clang::SourceLocation loc);
// Utility method for subclasses to determine the namespace of the
// specified record, if any. Unnamed namespaces will be identified as
@@ -99,9 +89,6 @@ class ChromeClassTester {
clang::CompilerInstance& instance_;
clang::DiagnosticsEngine& diagnostic_;
- // List of banned namespaces.
- std::set<std::string> banned_namespaces_;
-
// List of banned directories.
std::set<std::string> banned_directories_;
diff --git a/chromium/tools/clang/plugins/FindBadConstructsConsumer.cpp b/chromium/tools/clang/plugins/FindBadConstructsConsumer.cpp
index 633e2ed58ca..215e69b5f0f 100644
--- a/chromium/tools/clang/plugins/FindBadConstructsConsumer.cpp
+++ b/chromium/tools/clang/plugins/FindBadConstructsConsumer.cpp
@@ -16,42 +16,6 @@ namespace chrome_checker {
namespace {
-const char kMethodRequiresOverride[] =
- "[chromium-style] Overriding method must be marked with 'override' or "
- "'final'.";
-const char kRedundantVirtualSpecifier[] =
- "[chromium-style] %0 is redundant; %1 implies %0.";
-// http://llvm.org/bugs/show_bug.cgi?id=21051 has been filed to make this a
-// Clang warning.
-const char kBaseMethodVirtualAndFinal[] =
- "[chromium-style] The virtual method does not override anything and is "
- "final; consider making it non-virtual.";
-const char kNoExplicitDtor[] =
- "[chromium-style] Classes that are ref-counted should have explicit "
- "destructors that are declared protected or private.";
-const char kPublicDtor[] =
- "[chromium-style] Classes that are ref-counted should have "
- "destructors that are declared protected or private.";
-const char kProtectedNonVirtualDtor[] =
- "[chromium-style] Classes that are ref-counted and have non-private "
- "destructors should declare their destructor virtual.";
-const char kWeakPtrFactoryOrder[] =
- "[chromium-style] WeakPtrFactory members which refer to their outer class "
- "must be the last member in the outer class definition.";
-const char kBadLastEnumValue[] =
- "[chromium-style] _LAST/Last constants of enum types must have the maximal "
- "value for any constant of that type.";
-const char kAutoDeducedToAPointerType[] =
- "[chromium-style] auto variable type must not deduce to a raw pointer "
- "type.";
-const char kNoteInheritance[] = "[chromium-style] %0 inherits from %1 here";
-const char kNoteImplicitDtor[] =
- "[chromium-style] No explicit destructor for %0 defined";
-const char kNotePublicDtor[] =
- "[chromium-style] Public destructor declared here";
-const char kNoteProtectedNonVirtualDtor[] =
- "[chromium-style] Protected non-virtual destructor declared here";
-
// Returns the underlying Type for |type| by expanding typedefs and removing
// any namespace qualifiers. This is similar to desugaring, except that for
// ElaboratedTypes, desugar will unwrap too much.
@@ -143,39 +107,89 @@ FindBadConstructsConsumer::FindBadConstructsConsumer(CompilerInstance& instance,
ipc_visitor_.reset(new CheckIPCVisitor(instance));
}
- // Messages for virtual method specifiers.
- diag_method_requires_override_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kMethodRequiresOverride);
- diag_redundant_virtual_specifier_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kRedundantVirtualSpecifier);
- diag_base_method_virtual_and_final_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kBaseMethodVirtualAndFinal);
+ // Messages for virtual methods.
+ diag_method_requires_override_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Overriding method must be marked with 'override' or "
+ "'final'.");
+ diag_redundant_virtual_specifier_ = diagnostic().getCustomDiagID(
+ getErrorLevel(), "[chromium-style] %0 is redundant; %1 implies %0.");
+ // http://llvm.org/bugs/show_bug.cgi?id=21051 has been filed to make this a
+ // Clang warning.
+ diag_base_method_virtual_and_final_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] The virtual method does not override anything and is "
+ "final; consider making it non-virtual.");
+ diag_virtual_with_inline_body_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] virtual methods with non-empty bodies shouldn't be "
+ "declared inline.");
+
+ // Messages for constructors.
+ diag_no_explicit_ctor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Complex class/struct needs an explicit out-of-line "
+ "constructor.");
+ diag_no_explicit_copy_ctor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Complex class/struct needs an explicit out-of-line "
+ "copy constructor.");
+ diag_inline_complex_ctor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Complex constructor has an inlined body.");
// Messages for destructors.
- diag_no_explicit_dtor_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kNoExplicitDtor);
- diag_public_dtor_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kPublicDtor);
- diag_protected_non_virtual_dtor_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kProtectedNonVirtualDtor);
+ diag_no_explicit_dtor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Complex class/struct needs an explicit out-of-line "
+ "destructor.");
+ diag_inline_complex_dtor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Complex destructor has an inline body.");
+
+ // Messages for refcounted objects.
+ diag_refcounted_needs_explicit_dtor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Classes that are ref-counted should have explicit "
+ "destructors that are declared protected or private.");
+ diag_refcounted_with_public_dtor_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Classes that are ref-counted should have "
+ "destructors that are declared protected or private.");
+ diag_refcounted_with_protected_non_virtual_dtor_ =
+ diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] Classes that are ref-counted and have non-private "
+ "destructors should declare their destructor virtual.");
// Miscellaneous messages.
- diag_weak_ptr_factory_order_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kWeakPtrFactoryOrder);
+ diag_weak_ptr_factory_order_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] WeakPtrFactory members which refer to their outer "
+ "class "
+ "must be the last member in the outer class definition.");
diag_bad_enum_last_value_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kBadLastEnumValue);
- diag_auto_deduced_to_a_pointer_type_ =
- diagnostic().getCustomDiagID(getErrorLevel(), kAutoDeducedToAPointerType);
+ diagnostic().getCustomDiagID(getErrorLevel(),
+ "[chromium-style] _LAST/Last constants of "
+ "enum types must have the maximal "
+ "value for any constant of that type.");
+ diag_auto_deduced_to_a_pointer_type_ = diagnostic().getCustomDiagID(
+ getErrorLevel(),
+ "[chromium-style] auto variable type must not deduce to a raw pointer "
+ "type.");
// Registers notes to make it easier to interpret warnings.
- diag_note_inheritance_ =
- diagnostic().getCustomDiagID(DiagnosticsEngine::Note, kNoteInheritance);
- diag_note_implicit_dtor_ =
- diagnostic().getCustomDiagID(DiagnosticsEngine::Note, kNoteImplicitDtor);
- diag_note_public_dtor_ =
- diagnostic().getCustomDiagID(DiagnosticsEngine::Note, kNotePublicDtor);
+ diag_note_inheritance_ = diagnostic().getCustomDiagID(
+ DiagnosticsEngine::Note, "[chromium-style] %0 inherits from %1 here");
+ diag_note_implicit_dtor_ = diagnostic().getCustomDiagID(
+ DiagnosticsEngine::Note,
+ "[chromium-style] No explicit destructor for %0 defined");
+ diag_note_public_dtor_ = diagnostic().getCustomDiagID(
+ DiagnosticsEngine::Note,
+ "[chromium-style] Public destructor declared here");
diag_note_protected_non_virtual_dtor_ = diagnostic().getCustomDiagID(
- DiagnosticsEngine::Note, kNoteProtectedNonVirtualDtor);
+ DiagnosticsEngine::Note,
+ "[chromium-style] Protected non-virtual destructor declared here");
}
void FindBadConstructsConsumer::Traverse(ASTContext& context) {
@@ -219,8 +233,10 @@ bool FindBadConstructsConsumer::VisitVarDecl(clang::VarDecl* var_decl) {
void FindBadConstructsConsumer::CheckChromeClass(LocationType location_type,
SourceLocation record_location,
CXXRecordDecl* record) {
- // TODO(dcheng): After emitWarning() is removed, move warning filtering into
- // ReportIfSpellingLocNotIgnored.
+ // TODO(dcheng): This is needed because some of the diagnostics for refcounted
+ // classes use DiagnosticsEngine::Report() directly, and there are existing
+ // violations in Blink. This should be removed once the checks are
+ // modularized.
if (location_type == LocationType::kBlink)
return;
@@ -289,7 +305,8 @@ void FindBadConstructsConsumer::CheckChromeEnum(LocationType location_type,
if (((name.size() > 4 && name.compare(name.size() - 4, 4, "Last") == 0) ||
(name.size() > 5 && name.compare(name.size() - 5, 5, "_LAST") == 0)) &&
iter->getInitVal() < max_so_far) {
- diagnostic().Report(iter->getLocation(), diag_bad_enum_last_value_);
+ ReportIfSpellingLocNotIgnored(iter->getLocation(),
+ diag_bad_enum_last_value_);
}
}
}
@@ -360,9 +377,7 @@ void FindBadConstructsConsumer::CheckCtorDtorWeight(
if (ctor_score >= 10) {
if (!record->hasUserDeclaredConstructor()) {
- emitWarning(record_location,
- "Complex class/struct needs an explicit out-of-line "
- "constructor.");
+ ReportIfSpellingLocNotIgnored(record_location, diag_no_explicit_ctor_);
} else {
// Iterate across all the constructors in this file and yell if we
// find one that tries to be inline.
@@ -386,9 +401,8 @@ void FindBadConstructsConsumer::CheckCtorDtorWeight(
// be emitted on other platforms too, reevaluate if we want to keep
// surpressing this then http://crbug.com/467288
if (!record->hasAttr<DLLExportAttr>())
- emitWarning(record_location,
- "Complex class/struct needs an explicit out-of-line "
- "copy constructor.");
+ ReportIfSpellingLocNotIgnored(record_location,
+ diag_no_explicit_copy_ctor_);
} else {
// See the comment in the previous branch about copy constructors.
// This does the same for implicit move constructors.
@@ -397,8 +411,8 @@ void FindBadConstructsConsumer::CheckCtorDtorWeight(
!record->hasUserDeclaredMoveConstructor() &&
record->hasAttr<DLLExportAttr>();
if (!is_likely_compiler_generated_dllexport_move_ctor)
- emitWarning(it->getInnerLocStart(),
- "Complex constructor has an inlined body.");
+ ReportIfSpellingLocNotIgnored(it->getInnerLocStart(),
+ diag_inline_complex_ctor_);
}
} else if (it->isInlined() && !it->isInlineSpecified() &&
!it->isDeleted() && (!it->isCopyOrMoveConstructor() ||
@@ -408,8 +422,8 @@ void FindBadConstructsConsumer::CheckCtorDtorWeight(
// constructors in the previously mentioned situation. To preserve
// compatibility with existing Chromium code, only warn if it's an
// explicitly defaulted copy or move constructor.
- emitWarning(it->getInnerLocStart(),
- "Complex constructor has an inlined body.");
+ ReportIfSpellingLocNotIgnored(it->getInnerLocStart(),
+ diag_inline_complex_ctor_);
}
}
}
@@ -419,14 +433,12 @@ void FindBadConstructsConsumer::CheckCtorDtorWeight(
// trivial members; 20 ints don't need a destructor.
if (dtor_score >= 10 && !record->hasTrivialDestructor()) {
if (!record->hasUserDeclaredDestructor()) {
- emitWarning(record_location,
- "Complex class/struct needs an explicit out-of-line "
- "destructor.");
+ ReportIfSpellingLocNotIgnored(record_location, diag_no_explicit_dtor_);
} else if (CXXDestructorDecl* dtor = record->getDestructor()) {
if (dtor->isInlined() && !dtor->isInlineSpecified() &&
!dtor->isDeleted()) {
- emitWarning(dtor->getInnerLocStart(),
- "Complex destructor has an inline body.");
+ ReportIfSpellingLocNotIgnored(dtor->getInnerLocStart(),
+ diag_inline_complex_dtor_);
}
}
}
@@ -436,15 +448,13 @@ bool FindBadConstructsConsumer::InTestingNamespace(const Decl* record) {
return GetNamespace(record).find("testing") != std::string::npos;
}
-bool FindBadConstructsConsumer::IsMethodInBannedOrTestingNamespace(
+bool FindBadConstructsConsumer::IsMethodInTestingNamespace(
const CXXMethodDecl* method) {
- if (InBannedNamespace(method))
- return true;
for (CXXMethodDecl::method_iterator i = method->begin_overridden_methods();
i != method->end_overridden_methods();
++i) {
const CXXMethodDecl* overridden = *i;
- if (IsMethodInBannedOrTestingNamespace(overridden) ||
+ if (IsMethodInTestingNamespace(overridden) ||
// Provide an exception for ::testing::Test. gtest itself uses some
// magic to try to make sure SetUp()/TearDown() aren't capitalized
// incorrectly, but having the plugin enforce override is also nice.
@@ -460,10 +470,9 @@ bool FindBadConstructsConsumer::IsMethodInBannedOrTestingNamespace(
SuppressibleDiagnosticBuilder
FindBadConstructsConsumer::ReportIfSpellingLocNotIgnored(
SourceLocation loc,
- const Decl* record,
unsigned diagnostic_id) {
- LocationType type = ClassifyLocation(
- instance().getSourceManager().getSpellingLoc(loc), record);
+ LocationType type =
+ ClassifyLocation(instance().getSourceManager().getSpellingLoc(loc));
bool ignored =
type == LocationType::kThirdParty || type == LocationType::kBlink;
return SuppressibleDiagnosticBuilder(&diagnostic(), loc, diagnostic_id,
@@ -523,7 +532,7 @@ void FindBadConstructsConsumer::CheckVirtualSpecifiers(
OverrideAttr* override_attr = method->getAttr<OverrideAttr>();
FinalAttr* final_attr = method->getAttr<FinalAttr>();
- if (IsMethodInBannedOrTestingNamespace(method))
+ if (IsMethodInTestingNamespace(method))
return;
SourceManager& manager = instance().getSourceManager();
@@ -535,7 +544,7 @@ void FindBadConstructsConsumer::CheckVirtualSpecifiers(
// Note this is just an educated guess: the assumption here is that any
// macro for declaring methods will probably be at the start of the method's
// source range.
- ReportIfSpellingLocNotIgnored(method->getLocStart(), method,
+ ReportIfSpellingLocNotIgnored(method->getLocStart(),
diag_redundant_virtual_specifier_)
<< "'virtual'"
<< (override_attr ? static_cast<Attr*>(override_attr) : final_attr)
@@ -586,23 +595,23 @@ void FindBadConstructsConsumer::CheckVirtualSpecifiers(
// Again, only emit the warning if it doesn't originate from a macro in
// a system header.
if (loc.isValid()) {
- ReportIfSpellingLocNotIgnored(loc, method, diag_method_requires_override_)
+ ReportIfSpellingLocNotIgnored(loc, diag_method_requires_override_)
<< FixItHint::CreateInsertion(loc, " override");
} else {
- ReportIfSpellingLocNotIgnored(range.getBegin(), method,
+ ReportIfSpellingLocNotIgnored(range.getBegin(),
diag_method_requires_override_);
}
}
if (final_attr && override_attr) {
- ReportIfSpellingLocNotIgnored(override_attr->getLocation(), method,
+ ReportIfSpellingLocNotIgnored(override_attr->getLocation(),
diag_redundant_virtual_specifier_)
<< override_attr << final_attr
<< FixItHint::CreateRemoval(override_attr->getRange());
}
if (final_attr && !is_override) {
- ReportIfSpellingLocNotIgnored(method->getLocStart(), method,
+ ReportIfSpellingLocNotIgnored(method->getLocStart(),
diag_base_method_virtual_and_final_)
<< FixItRemovalForVirtual(manager, lang_opts, method)
<< FixItHint::CreateRemoval(final_attr->getRange());
@@ -623,8 +632,7 @@ void FindBadConstructsConsumer::CheckVirtualBodies(
bool emit = true;
if (loc.isMacroID()) {
SourceManager& manager = instance().getSourceManager();
- LocationType type =
- ClassifyLocation(manager.getSpellingLoc(loc), method);
+ LocationType type = ClassifyLocation(manager.getSpellingLoc(loc));
if (type == LocationType::kThirdParty || type == LocationType::kBlink)
emit = false;
else {
@@ -636,9 +644,7 @@ void FindBadConstructsConsumer::CheckVirtualBodies(
}
}
if (emit)
- emitWarning(loc,
- "virtual methods with non-empty bodies shouldn't be "
- "declared inline.");
+ ReportIfSpellingLocNotIgnored(loc, diag_virtual_with_inline_body_);
}
}
}
@@ -804,9 +810,9 @@ void FindBadConstructsConsumer::PrintInheritanceChain(const CXXBasePath& path) {
unsigned FindBadConstructsConsumer::DiagnosticForIssue(RefcountIssue issue) {
switch (issue) {
case ImplicitDestructor:
- return diag_no_explicit_dtor_;
+ return diag_refcounted_needs_explicit_dtor_;
case PublicDestructor:
- return diag_public_dtor_;
+ return diag_refcounted_with_public_dtor_;
case None:
assert(false && "Do not call DiagnosticForIssue with issue None");
return 0;
@@ -853,7 +859,8 @@ void FindBadConstructsConsumer::CheckRefCountedDtors(
refcounted_path.begin()->back().Class->getDestructor()) {
if (dtor->getAccess() == AS_protected && !dtor->isVirtual()) {
loc = dtor->getInnerLocStart();
- diagnostic().Report(loc, diag_protected_non_virtual_dtor_);
+ ReportIfSpellingLocNotIgnored(
+ loc, diag_refcounted_with_protected_non_virtual_dtor_);
return;
}
}
@@ -907,12 +914,13 @@ void FindBadConstructsConsumer::CheckRefCountedDtors(
issue = CheckRecordForRefcountIssue(problem_record, loc);
if (issue == ImplicitDestructor) {
- diagnostic().Report(record_location, diag_no_explicit_dtor_);
+ diagnostic().Report(record_location,
+ diag_refcounted_needs_explicit_dtor_);
PrintInheritanceChain(refcounted_path.front());
diagnostic().Report(loc, diag_note_implicit_dtor_) << problem_record;
PrintInheritanceChain(*it);
} else if (issue == PublicDestructor) {
- diagnostic().Report(record_location, diag_public_dtor_);
+ diagnostic().Report(record_location, diag_refcounted_with_public_dtor_);
PrintInheritanceChain(refcounted_path.front());
diagnostic().Report(loc, diag_note_public_dtor_);
PrintInheritanceChain(*it);
@@ -969,8 +977,8 @@ void FindBadConstructsConsumer::CheckWeakPtrFactoryMembers(
// one of those, it means there is at least one member after a factory.
if (weak_ptr_factory_location.isValid() &&
!param_is_weak_ptr_factory_to_self) {
- diagnostic().Report(weak_ptr_factory_location,
- diag_weak_ptr_factory_order_);
+ ReportIfSpellingLocNotIgnored(weak_ptr_factory_location,
+ diag_weak_ptr_factory_order_);
}
}
}
@@ -1015,9 +1023,8 @@ void FindBadConstructsConsumer::CheckVarDecl(clang::VarDecl* var_decl) {
// should be fewer auto types than banned namespace/directory types,
// so check this last.
LocationType location_type =
- ClassifyLocation(var_decl->getLocStart(), var_decl);
- if (!InBannedNamespace(var_decl) &&
- location_type != LocationType::kThirdParty) {
+ ClassifyLocation(var_decl->getLocStart());
+ if (location_type != LocationType::kThirdParty) {
// The range starts from |var_decl|'s loc start, which is the
// beginning of the full expression defining this |var_decl|. It
// ends, however, where this |var_decl|'s type loc ends, since
@@ -1028,7 +1035,7 @@ void FindBadConstructsConsumer::CheckVarDecl(clang::VarDecl* var_decl) {
clang::SourceRange range(
var_decl->getLocStart(),
var_decl->getTypeSourceInfo()->getTypeLoc().getLocEnd());
- ReportIfSpellingLocNotIgnored(range.getBegin(), var_decl,
+ ReportIfSpellingLocNotIgnored(range.getBegin(),
diag_auto_deduced_to_a_pointer_type_)
<< FixItHint::CreateReplacement(
range,
diff --git a/chromium/tools/clang/plugins/FindBadConstructsConsumer.h b/chromium/tools/clang/plugins/FindBadConstructsConsumer.h
index f2eeb20b25a..6e7930a4471 100644
--- a/chromium/tools/clang/plugins/FindBadConstructsConsumer.h
+++ b/chromium/tools/clang/plugins/FindBadConstructsConsumer.h
@@ -71,7 +71,7 @@ class FindBadConstructsConsumer
clang::CXXRecordDecl* record);
bool InTestingNamespace(const clang::Decl* record);
- bool IsMethodInBannedOrTestingNamespace(const clang::CXXMethodDecl* method);
+ bool IsMethodInTestingNamespace(const clang::CXXMethodDecl* method);
// Returns a diagnostic builder that only emits the diagnostic if the spelling
// location (the actual characters that make up the token) is not in an
@@ -80,7 +80,6 @@ class FindBadConstructsConsumer
// generally can't be easily updated.
SuppressibleDiagnosticBuilder ReportIfSpellingLocNotIgnored(
clang::SourceLocation loc,
- const clang::Decl* record,
unsigned diagnostic_id);
void CheckVirtualMethods(clang::SourceLocation record_location,
@@ -116,9 +115,15 @@ class FindBadConstructsConsumer
unsigned diag_method_requires_override_;
unsigned diag_redundant_virtual_specifier_;
unsigned diag_base_method_virtual_and_final_;
+ unsigned diag_virtual_with_inline_body_;
+ unsigned diag_no_explicit_ctor_;
+ unsigned diag_no_explicit_copy_ctor_;
+ unsigned diag_inline_complex_ctor_;
unsigned diag_no_explicit_dtor_;
- unsigned diag_public_dtor_;
- unsigned diag_protected_non_virtual_dtor_;
+ unsigned diag_inline_complex_dtor_;
+ unsigned diag_refcounted_needs_explicit_dtor_;
+ unsigned diag_refcounted_with_public_dtor_;
+ unsigned diag_refcounted_with_protected_non_virtual_dtor_;
unsigned diag_weak_ptr_factory_order_;
unsigned diag_bad_enum_last_value_;
unsigned diag_auto_deduced_to_a_pointer_type_;
diff --git a/chromium/tools/clang/scripts/InstructionCombining.cpp b/chromium/tools/clang/scripts/InstructionCombining.cpp
deleted file mode 100644
index a1cdd426220..00000000000
--- a/chromium/tools/clang/scripts/InstructionCombining.cpp
+++ /dev/null
@@ -1,3306 +0,0 @@
-//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// InstructionCombining - Combine instructions to form fewer, simple
-// instructions. This pass does not modify the CFG. This pass is where
-// algebraic simplification happens.
-//
-// This pass combines things like:
-// %Y = add i32 %X, 1
-// %Z = add i32 %Y, 1
-// into:
-// %Z = add i32 %X, 2
-//
-// This is a simple worklist driven algorithm.
-//
-// This pass guarantees that the following canonicalizations are performed on
-// the program:
-// 1. If a binary operator has a constant operand, it is moved to the RHS
-// 2. Bitwise operators with constant operands are always grouped so that
-// shifts are performed first, then or's, then and's, then xor's.
-// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
-// 4. All cmp instructions on boolean values are replaced with logical ops
-// 5. add X, X is represented as (X*2) => (X << 1)
-// 6. Multiplies with a power-of-two constant argument are transformed into
-// shifts.
-// ... etc.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombineInternal.h"
-#include "llvm-c/Initialization.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/AssumptionCache.h"
-#include "llvm/Analysis/BasicAliasAnalysis.h"
-#include "llvm/Analysis/CFG.h"
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/EHPersonalities.h"
-#include "llvm/Analysis/GlobalsModRef.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/CFG.h"
-#include "llvm/IR/DIBuilder.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/GetElementPtrTypeIterator.h"
-#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/PatternMatch.h"
-#include "llvm/IR/ValueHandle.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/DebugCounter.h"
-#include "llvm/Support/KnownBits.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Transforms/InstCombine/InstCombine.h"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include <algorithm>
-#include <climits>
-using namespace llvm;
-using namespace llvm::PatternMatch;
-
-#define DEBUG_TYPE "instcombine"
-
-STATISTIC(NumCombined , "Number of insts combined");
-STATISTIC(NumConstProp, "Number of constant folds");
-STATISTIC(NumDeadInst , "Number of dead inst eliminated");
-STATISTIC(NumSunkInst , "Number of instructions sunk");
-STATISTIC(NumExpand, "Number of expansions");
-STATISTIC(NumFactor , "Number of factorizations");
-STATISTIC(NumReassoc , "Number of reassociations");
-DEBUG_COUNTER(VisitCounter, "instcombine-visit",
- "Controls which instructions are visited");
-
-static cl::opt<bool>
-EnableExpensiveCombines("expensive-combines",
- cl::desc("Enable expensive instruction combines"));
-
-static cl::opt<unsigned>
-MaxArraySize("instcombine-maxarray-size", cl::init(1024),
- cl::desc("Maximum array size considered when doing a combine"));
-
-Value *InstCombiner::EmitGEPOffset(User *GEP) {
- return llvm::EmitGEPOffset(&Builder, DL, GEP);
-}
-
-/// Return true if it is desirable to convert an integer computation from a
-/// given bit width to a new bit width.
-/// We don't want to convert from a legal to an illegal type or from a smaller
-/// to a larger illegal type. A width of '1' is always treated as a legal type
-/// because i1 is a fundamental type in IR, and there are many specialized
-/// optimizations for i1 types.
-bool InstCombiner::shouldChangeType(unsigned FromWidth,
- unsigned ToWidth) const {
- bool FromLegal = FromWidth == 1 || DL.isLegalInteger(FromWidth);
- bool ToLegal = ToWidth == 1 || DL.isLegalInteger(ToWidth);
-
- // If this is a legal integer from type, and the result would be an illegal
- // type, don't do the transformation.
- if (FromLegal && !ToLegal)
- return false;
-
- // Otherwise, if both are illegal, do not increase the size of the result. We
- // do allow things like i160 -> i64, but not i64 -> i160.
- if (!FromLegal && !ToLegal && ToWidth > FromWidth)
- return false;
-
- return true;
-}
-
-/// Return true if it is desirable to convert a computation from 'From' to 'To'.
-/// We don't want to convert from a legal to an illegal type or from a smaller
-/// to a larger illegal type. i1 is always treated as a legal type because it is
-/// a fundamental type in IR, and there are many specialized optimizations for
-/// i1 types.
-bool InstCombiner::shouldChangeType(Type *From, Type *To) const {
- assert(From->isIntegerTy() && To->isIntegerTy());
-
- unsigned FromWidth = From->getPrimitiveSizeInBits();
- unsigned ToWidth = To->getPrimitiveSizeInBits();
- return shouldChangeType(FromWidth, ToWidth);
-}
-
-// Return true, if No Signed Wrap should be maintained for I.
-// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
-// where both B and C should be ConstantInts, results in a constant that does
-// not overflow. This function only handles the Add and Sub opcodes. For
-// all other opcodes, the function conservatively returns false.
-static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
- OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
- if (!OBO || !OBO->hasNoSignedWrap())
- return false;
-
- // We reason about Add and Sub Only.
- Instruction::BinaryOps Opcode = I.getOpcode();
- if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
- return false;
-
- const APInt *BVal, *CVal;
- if (!match(B, m_APInt(BVal)) || !match(C, m_APInt(CVal)))
- return false;
-
- bool Overflow = false;
- if (Opcode == Instruction::Add)
- (void)BVal->sadd_ov(*CVal, Overflow);
- else
- (void)BVal->ssub_ov(*CVal, Overflow);
-
- return !Overflow;
-}
-
-/// Conservatively clears subclassOptionalData after a reassociation or
-/// commutation. We preserve fast-math flags when applicable as they can be
-/// preserved.
-static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {
- FPMathOperator *FPMO = dyn_cast<FPMathOperator>(&I);
- if (!FPMO) {
- I.clearSubclassOptionalData();
- return;
- }
-
- FastMathFlags FMF = I.getFastMathFlags();
- I.clearSubclassOptionalData();
- I.setFastMathFlags(FMF);
-}
-
-/// Combine constant operands of associative operations either before or after a
-/// cast to eliminate one of the associative operations:
-/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
-/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
-static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1) {
- auto *Cast = dyn_cast<CastInst>(BinOp1->getOperand(0));
- if (!Cast || !Cast->hasOneUse())
- return false;
-
- // TODO: Enhance logic for other casts and remove this check.
- auto CastOpcode = Cast->getOpcode();
- if (CastOpcode != Instruction::ZExt)
- return false;
-
- // TODO: Enhance logic for other BinOps and remove this check.
- if (!BinOp1->isBitwiseLogicOp())
- return false;
-
- auto AssocOpcode = BinOp1->getOpcode();
- auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
- if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
- return false;
-
- Constant *C1, *C2;
- if (!match(BinOp1->getOperand(1), m_Constant(C1)) ||
- !match(BinOp2->getOperand(1), m_Constant(C2)))
- return false;
-
- // TODO: This assumes a zext cast.
- // Eg, if it was a trunc, we'd cast C1 to the source type because casting C2
- // to the destination type might lose bits.
-
- // Fold the constants together in the destination type:
- // (op (cast (op X, C2)), C1) --> (op (cast X), FoldedC)
- Type *DestTy = C1->getType();
- Constant *CastC2 = ConstantExpr::getCast(CastOpcode, C2, DestTy);
- Constant *FoldedC = ConstantExpr::get(AssocOpcode, C1, CastC2);
- Cast->setOperand(0, BinOp2->getOperand(0));
- BinOp1->setOperand(1, FoldedC);
- return true;
-}
-
-/// This performs a few simplifications for operators that are associative or
-/// commutative:
-///
-/// Commutative operators:
-///
-/// 1. Order operands such that they are listed from right (least complex) to
-/// left (most complex). This puts constants before unary operators before
-/// binary operators.
-///
-/// Associative operators:
-///
-/// 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
-/// 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
-///
-/// Associative and commutative operators:
-///
-/// 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
-/// 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
-/// 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
-/// if C1 and C2 are constants.
-bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
- Instruction::BinaryOps Opcode = I.getOpcode();
- bool Changed = false;
-
- do {
- // Order operands such that they are listed from right (least complex) to
- // left (most complex). This puts constants before unary operators before
- // binary operators.
- if (I.isCommutative() && getComplexity(I.getOperand(0)) <
- getComplexity(I.getOperand(1)))
- Changed = !I.swapOperands();
-
- BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
- BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
-
- if (I.isAssociative()) {
- // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
- if (Op0 && Op0->getOpcode() == Opcode) {
- Value *A = Op0->getOperand(0);
- Value *B = Op0->getOperand(1);
- Value *C = I.getOperand(1);
-
- // Does "B op C" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, C, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "A op V".
- I.setOperand(0, A);
- I.setOperand(1, V);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- if (MaintainNoSignedWrap(I, B, C) &&
- (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
- // Note: this is only valid because SimplifyBinOp doesn't look at
- // the operands to Op0.
- I.clearSubclassOptionalData();
- I.setHasNoSignedWrap(true);
- } else {
- ClearSubclassDataAfterReassociation(I);
- }
-
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
-
- // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
- if (Op1 && Op1->getOpcode() == Opcode) {
- Value *A = I.getOperand(0);
- Value *B = Op1->getOperand(0);
- Value *C = Op1->getOperand(1);
-
- // Does "A op B" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, B, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "V op C".
- I.setOperand(0, V);
- I.setOperand(1, C);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
- }
-
- if (I.isAssociative() && I.isCommutative()) {
- if (simplifyAssocCastAssoc(&I)) {
- Changed = true;
- ++NumReassoc;
- continue;
- }
-
- // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
- if (Op0 && Op0->getOpcode() == Opcode) {
- Value *A = Op0->getOperand(0);
- Value *B = Op0->getOperand(1);
- Value *C = I.getOperand(1);
-
- // Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "V op B".
- I.setOperand(0, V);
- I.setOperand(1, B);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
-
- // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
- if (Op1 && Op1->getOpcode() == Opcode) {
- Value *A = I.getOperand(0);
- Value *B = Op1->getOperand(0);
- Value *C = Op1->getOperand(1);
-
- // Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, SQ.getWithInstruction(&I))) {
- // It simplifies to V. Form "B op V".
- I.setOperand(0, B);
- I.setOperand(1, V);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
- Changed = true;
- ++NumReassoc;
- continue;
- }
- }
-
- // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
- // if C1 and C2 are constants.
- if (Op0 && Op1 &&
- Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
- isa<Constant>(Op0->getOperand(1)) &&
- isa<Constant>(Op1->getOperand(1)) &&
- Op0->hasOneUse() && Op1->hasOneUse()) {
- Value *A = Op0->getOperand(0);
- Constant *C1 = cast<Constant>(Op0->getOperand(1));
- Value *B = Op1->getOperand(0);
- Constant *C2 = cast<Constant>(Op1->getOperand(1));
-
- Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
- BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
- if (isa<FPMathOperator>(New)) {
- FastMathFlags Flags = I.getFastMathFlags();
- Flags &= Op0->getFastMathFlags();
- Flags &= Op1->getFastMathFlags();
- New->setFastMathFlags(Flags);
- }
- InsertNewInstWith(New, I);
- New->takeName(Op1);
- I.setOperand(0, New);
- I.setOperand(1, Folded);
- // Conservatively clear the optional flags, since they may not be
- // preserved by the reassociation.
- ClearSubclassDataAfterReassociation(I);
-
- Changed = true;
- continue;
- }
- }
-
- // No further simplifications.
- return Changed;
- } while (1);
-}
-
-/// Return whether "X LOp (Y ROp Z)" is always equal to
-/// "(X LOp Y) ROp (X LOp Z)".
-static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
- Instruction::BinaryOps ROp) {
- switch (LOp) {
- default:
- return false;
-
- case Instruction::And:
- // And distributes over Or and Xor.
- switch (ROp) {
- default:
- return false;
- case Instruction::Or:
- case Instruction::Xor:
- return true;
- }
-
- case Instruction::Mul:
- // Multiplication distributes over addition and subtraction.
- switch (ROp) {
- default:
- return false;
- case Instruction::Add:
- case Instruction::Sub:
- return true;
- }
-
- case Instruction::Or:
- // Or distributes over And.
- switch (ROp) {
- default:
- return false;
- case Instruction::And:
- return true;
- }
- }
-}
-
-/// Return whether "(X LOp Y) ROp Z" is always equal to
-/// "(X ROp Z) LOp (Y ROp Z)".
-static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
- Instruction::BinaryOps ROp) {
- if (Instruction::isCommutative(ROp))
- return LeftDistributesOverRight(ROp, LOp);
-
- switch (LOp) {
- default:
- return false;
- // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
- // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
- // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- switch (ROp) {
- default:
- return false;
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- return true;
- }
- }
- // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
- // but this requires knowing that the addition does not overflow and other
- // such subtleties.
- return false;
-}
-
-/// This function returns identity value for given opcode, which can be used to
-/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
-static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {
- if (isa<Constant>(V))
- return nullptr;
-
- return ConstantExpr::getBinOpIdentity(Opcode, V->getType());
-}
-
-/// This function factors binary ops which can be combined using distributive
-/// laws. This function tries to transform 'Op' based TopLevelOpcode to enable
-/// factorization e.g for ADD(SHL(X , 2), MUL(X, 5)), When this function called
-/// with TopLevelOpcode == Instruction::Add and Op = SHL(X, 2), transforms
-/// SHL(X, 2) to MUL(X, 4) i.e. returns Instruction::Mul with LHS set to 'X' and
-/// RHS to 4.
-static Instruction::BinaryOps
-getBinOpsForFactorization(Instruction::BinaryOps TopLevelOpcode,
- BinaryOperator *Op, Value *&LHS, Value *&RHS) {
- assert(Op && "Expected a binary operator");
-
- LHS = Op->getOperand(0);
- RHS = Op->getOperand(1);
-
- switch (TopLevelOpcode) {
- default:
- return Op->getOpcode();
-
- case Instruction::Add:
- case Instruction::Sub:
- if (Op->getOpcode() == Instruction::Shl) {
- if (Constant *CST = dyn_cast<Constant>(Op->getOperand(1))) {
- // The multiplier is really 1 << CST.
- RHS = ConstantExpr::getShl(ConstantInt::get(Op->getType(), 1), CST);
- return Instruction::Mul;
- }
- }
- return Op->getOpcode();
- }
-
- // TODO: We can add other conversions e.g. shr => div etc.
-}
-
-/// This tries to simplify binary operations by factorizing out common terms
-/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
-Value *InstCombiner::tryFactorization(BinaryOperator &I,
- Instruction::BinaryOps InnerOpcode,
- Value *A, Value *B, Value *C, Value *D) {
- assert(A && B && C && D && "All values must be provided");
-
- Value *V = nullptr;
- Value *SimplifiedInst = nullptr;
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
-
- // Does "X op' Y" always equal "Y op' X"?
- bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
-
- // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
- if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
- // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
- // commutative case, "(A op' B) op (C op' A)"?
- if (A == C || (InnerCommutative && A == D)) {
- if (A != C)
- std::swap(C, D);
- // Consider forming "A op' (B op D)".
- // If "B op D" simplifies then it can be formed with no cost.
- V = SimplifyBinOp(TopLevelOpcode, B, D, SQ.getWithInstruction(&I));
- // If "B op D" doesn't simplify then only go on if both of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && LHS->hasOneUse() && RHS->hasOneUse())
- V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
- if (V) {
- SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V);
- }
- }
-
- // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
- if (!SimplifiedInst && RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
- // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
- // commutative case, "(A op' B) op (B op' D)"?
- if (B == D || (InnerCommutative && B == C)) {
- if (B != D)
- std::swap(C, D);
- // Consider forming "(A op C) op' B".
- // If "A op C" simplifies then it can be formed with no cost.
- V = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
-
- // If "A op C" doesn't simplify then only go on if both of the existing
- // operations "A op' B" and "C op' D" will be zapped as no longer used.
- if (!V && LHS->hasOneUse() && RHS->hasOneUse())
- V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
- if (V) {
- SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B);
- }
- }
-
- if (SimplifiedInst) {
- ++NumFactor;
- SimplifiedInst->takeName(&I);
-
- // Check if we can add NSW flag to SimplifiedInst. If so, set NSW flag.
- // TODO: Check for NUW.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SimplifiedInst)) {
- if (isa<OverflowingBinaryOperator>(SimplifiedInst)) {
- bool HasNSW = false;
- if (isa<OverflowingBinaryOperator>(&I))
- HasNSW = I.hasNoSignedWrap();
-
- if (auto *LOBO = dyn_cast<OverflowingBinaryOperator>(LHS))
- HasNSW &= LOBO->hasNoSignedWrap();
-
- if (auto *ROBO = dyn_cast<OverflowingBinaryOperator>(RHS))
- HasNSW &= ROBO->hasNoSignedWrap();
-
- // We can propagate 'nsw' if we know that
- // %Y = mul nsw i16 %X, C
- // %Z = add nsw i16 %Y, %X
- // =>
- // %Z = mul nsw i16 %X, C+1
- //
- // iff C+1 isn't INT_MIN
- const APInt *CInt;
- if (TopLevelOpcode == Instruction::Add &&
- InnerOpcode == Instruction::Mul)
- if (match(V, m_APInt(CInt)) && !CInt->isMinSignedValue())
- BO->setHasNoSignedWrap(HasNSW);
- }
- }
- }
- return SimplifiedInst;
-}
-
-/// This tries to simplify binary operations which some other binary operation
-/// distributes over either by factorizing out common terms
-/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
-/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
-/// Returns the simplified value, or null if it didn't simplify.
-Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
- BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
- Instruction::BinaryOps TopLevelOpcode = I.getOpcode();
-
- {
- // Factorization.
- Value *A, *B, *C, *D;
- Instruction::BinaryOps LHSOpcode, RHSOpcode;
- if (Op0)
- LHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op0, A, B);
- if (Op1)
- RHSOpcode = getBinOpsForFactorization(TopLevelOpcode, Op1, C, D);
-
- // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
- // a common term.
- if (Op0 && Op1 && LHSOpcode == RHSOpcode)
- if (Value *V = tryFactorization(I, LHSOpcode, A, B, C, D))
- return V;
-
- // The instruction has the form "(A op' B) op (C)". Try to factorize common
- // term.
- if (Op0)
- if (Value *Ident = getIdentityValue(LHSOpcode, RHS))
- if (Value *V =
- tryFactorization(I, LHSOpcode, A, B, RHS, Ident))
- return V;
-
- // The instruction has the form "(B) op (C op' D)". Try to factorize common
- // term.
- if (Op1)
- if (Value *Ident = getIdentityValue(RHSOpcode, LHS))
- if (Value *V =
- tryFactorization(I, RHSOpcode, LHS, Ident, C, D))
- return V;
- }
-
- // Expansion.
- if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
- // The instruction has the form "(A op' B) op C". See if expanding it out
- // to "(A op C) op' (B op C)" results in simplifications.
- Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
- Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
-
- Value *L = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
- Value *R = SimplifyBinOp(TopLevelOpcode, B, C, SQ.getWithInstruction(&I));
-
- // Do "A op C" and "B op C" both simplify?
- if (L && R) {
- // They do! Return "L op' R".
- ++NumExpand;
- C = Builder.CreateBinOp(InnerOpcode, L, R);
- C->takeName(&I);
- return C;
- }
-
- // Does "A op C" simplify to the identity value for the inner opcode?
- if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
- // They do! Return "B op C".
- ++NumExpand;
- C = Builder.CreateBinOp(TopLevelOpcode, B, C);
- C->takeName(&I);
- return C;
- }
-
- // Does "B op C" simplify to the identity value for the inner opcode?
- if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
- // They do! Return "A op C".
- ++NumExpand;
- C = Builder.CreateBinOp(TopLevelOpcode, A, C);
- C->takeName(&I);
- return C;
- }
- }
-
- if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
- // The instruction has the form "A op (B op' C)". See if expanding it out
- // to "(A op B) op' (A op C)" results in simplifications.
- Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
- Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
-
- Value *L = SimplifyBinOp(TopLevelOpcode, A, B, SQ.getWithInstruction(&I));
- Value *R = SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I));
-
- // Do "A op B" and "A op C" both simplify?
- if (L && R) {
- // They do! Return "L op' R".
- ++NumExpand;
- A = Builder.CreateBinOp(InnerOpcode, L, R);
- A->takeName(&I);
- return A;
- }
-
- // Does "A op B" simplify to the identity value for the inner opcode?
- if (L && L == ConstantExpr::getBinOpIdentity(InnerOpcode, L->getType())) {
- // They do! Return "A op C".
- ++NumExpand;
- A = Builder.CreateBinOp(TopLevelOpcode, A, C);
- A->takeName(&I);
- return A;
- }
-
- // Does "A op C" simplify to the identity value for the inner opcode?
- if (R && R == ConstantExpr::getBinOpIdentity(InnerOpcode, R->getType())) {
- // They do! Return "A op B".
- ++NumExpand;
- A = Builder.CreateBinOp(TopLevelOpcode, A, B);
- A->takeName(&I);
- return A;
- }
- }
-
- // (op (select (a, c, b)), (select (a, d, b))) -> (select (a, (op c, d), 0))
- // (op (select (a, b, c)), (select (a, b, d))) -> (select (a, 0, (op c, d)))
- if (auto *SI0 = dyn_cast<SelectInst>(LHS)) {
- if (auto *SI1 = dyn_cast<SelectInst>(RHS)) {
- if (SI0->getCondition() == SI1->getCondition()) {
- Value *SI = nullptr;
- if (Value *V =
- SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
- SI1->getFalseValue(), SQ.getWithInstruction(&I)))
- SI = Builder.CreateSelect(SI0->getCondition(),
- Builder.CreateBinOp(TopLevelOpcode,
- SI0->getTrueValue(),
- SI1->getTrueValue()),
- V);
- if (Value *V =
- SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
- SI1->getTrueValue(), SQ.getWithInstruction(&I)))
- SI = Builder.CreateSelect(
- SI0->getCondition(), V,
- Builder.CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
- SI1->getFalseValue()));
- if (SI) {
- SI->takeName(&I);
- return SI;
- }
- }
- }
- }
-
- return nullptr;
-}
-
-/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
-/// constant zero (which is the 'negate' form).
-Value *InstCombiner::dyn_castNegVal(Value *V) const {
- if (BinaryOperator::isNeg(V))
- return BinaryOperator::getNegArgument(V);
-
- // Constants can be considered to be negated values if they can be folded.
- if (ConstantInt *C = dyn_cast<ConstantInt>(V))
- return ConstantExpr::getNeg(C);
-
- if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
- if (C->getType()->getElementType()->isIntegerTy())
- return ConstantExpr::getNeg(C);
-
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
- for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
- Constant *Elt = CV->getAggregateElement(i);
- if (!Elt)
- return nullptr;
-
- if (isa<UndefValue>(Elt))
- continue;
-
- if (!isa<ConstantInt>(Elt))
- return nullptr;
- }
- return ConstantExpr::getNeg(CV);
- }
-
- return nullptr;
-}
-
-/// Given a 'fsub' instruction, return the RHS of the instruction if the LHS is
-/// a constant negative zero (which is the 'negate' form).
-Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
- if (BinaryOperator::isFNeg(V, IgnoreZeroSign))
- return BinaryOperator::getFNegArgument(V);
-
- // Constants can be considered to be negated values if they can be folded.
- if (ConstantFP *C = dyn_cast<ConstantFP>(V))
- return ConstantExpr::getFNeg(C);
-
- if (ConstantDataVector *C = dyn_cast<ConstantDataVector>(V))
- if (C->getType()->getElementType()->isFloatingPointTy())
- return ConstantExpr::getFNeg(C);
-
- return nullptr;
-}
-
-static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
- InstCombiner::BuilderTy &Builder) {
- if (auto *Cast = dyn_cast<CastInst>(&I))
- return Builder.CreateCast(Cast->getOpcode(), SO, I.getType());
-
- assert(I.isBinaryOp() && "Unexpected opcode for select folding");
-
- // Figure out if the constant is the left or the right argument.
- bool ConstIsRHS = isa<Constant>(I.getOperand(1));
- Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
-
- if (auto *SOC = dyn_cast<Constant>(SO)) {
- if (ConstIsRHS)
- return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
- return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
- }
-
- Value *Op0 = SO, *Op1 = ConstOperand;
- if (!ConstIsRHS)
- std::swap(Op0, Op1);
-
- auto *BO = cast<BinaryOperator>(&I);
- Value *RI = Builder.CreateBinOp(BO->getOpcode(), Op0, Op1,
- SO->getName() + ".op");
- auto *FPInst = dyn_cast<Instruction>(RI);
- if (FPInst && isa<FPMathOperator>(FPInst))
- FPInst->copyFastMathFlags(BO);
- return RI;
-}
-
-Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
- // Don't modify shared select instructions.
- if (!SI->hasOneUse())
- return nullptr;
-
- Value *TV = SI->getTrueValue();
- Value *FV = SI->getFalseValue();
- if (!(isa<Constant>(TV) || isa<Constant>(FV)))
- return nullptr;
-
- // Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isIntOrIntVectorTy(1))
- return nullptr;
-
- // If it's a bitcast involving vectors, make sure it has the same number of
- // elements on both sides.
- if (auto *BC = dyn_cast<BitCastInst>(&Op)) {
- VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
- VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
-
- // Verify that either both or neither are vectors.
- if ((SrcTy == nullptr) != (DestTy == nullptr))
- return nullptr;
-
- // If vectors, verify that they have the same number of elements.
- if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
- return nullptr;
- }
-
- // Test if a CmpInst instruction is used exclusively by a select as
- // part of a minimum or maximum operation. If so, refrain from doing
- // any other folding. This helps out other analyses which understand
- // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
- // and CodeGen. And in this case, at least one of the comparison
- // operands has at least one user besides the compare (the select),
- // which would often largely negate the benefit of folding anyway.
- if (auto *CI = dyn_cast<CmpInst>(SI->getCondition())) {
- if (CI->hasOneUse()) {
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
- if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
- (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
- return nullptr;
- }
- }
-
- Value *NewTV = foldOperationIntoSelectOperand(Op, TV, Builder);
- Value *NewFV = foldOperationIntoSelectOperand(Op, FV, Builder);
- return SelectInst::Create(SI->getCondition(), NewTV, NewFV, "", nullptr, SI);
-}
-
-static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
- InstCombiner::BuilderTy &Builder) {
- bool ConstIsRHS = isa<Constant>(I->getOperand(1));
- Constant *C = cast<Constant>(I->getOperand(ConstIsRHS));
-
- if (auto *InC = dyn_cast<Constant>(InV)) {
- if (ConstIsRHS)
- return ConstantExpr::get(I->getOpcode(), InC, C);
- return ConstantExpr::get(I->getOpcode(), C, InC);
- }
-
- Value *Op0 = InV, *Op1 = C;
- if (!ConstIsRHS)
- std::swap(Op0, Op1);
-
- Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp");
- auto *FPInst = dyn_cast<Instruction>(RI);
- if (FPInst && isa<FPMathOperator>(FPInst))
- FPInst->copyFastMathFlags(I);
- return RI;
-}
-
-Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
- unsigned NumPHIValues = PN->getNumIncomingValues();
- if (NumPHIValues == 0)
- return nullptr;
-
- // We normally only transform phis with a single use. However, if a PHI has
- // multiple uses and they are all the same operation, we can fold *all* of the
- // uses into the PHI.
- if (!PN->hasOneUse()) {
- // Walk the use list for the instruction, comparing them to I.
- for (User *U : PN->users()) {
- Instruction *UI = cast<Instruction>(U);
- if (UI != &I && !I.isIdenticalTo(UI))
- return nullptr;
- }
- // Otherwise, we can replace *all* users with the new PHI we form.
- }
-
- // Check to see if all of the operands of the PHI are simple constants
- // (constantint/constantfp/undef). If there is one non-constant value,
- // remember the BB it is in. If there is more than one or if *it* is a PHI,
- // bail out. We don't do arbitrary constant expressions here because moving
- // their computation can be expensive without a cost model.
- BasicBlock *NonConstBB = nullptr;
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InVal = PN->getIncomingValue(i);
- if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
- continue;
-
- if (isa<PHINode>(InVal)) return nullptr; // Itself a phi.
- if (NonConstBB) return nullptr; // More than one non-const value.
-
- NonConstBB = PN->getIncomingBlock(i);
-
- // If the InVal is an invoke at the end of the pred block, then we can't
- // insert a computation after it without breaking the edge.
- if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
- if (II->getParent() == NonConstBB)
- return nullptr;
-
- // If the incoming non-constant value is in I's block, we will remove one
- // instruction, but insert another equivalent one, leading to infinite
- // instcombine.
- if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI))
- return nullptr;
- }
-
- // If there is exactly one non-constant value, we can insert a copy of the
- // operation in that block. However, if this is a critical edge, we would be
- // inserting the computation on some other paths (e.g. inside a loop). Only
- // do this if the pred block is unconditionally branching into the phi block.
- if (NonConstBB != nullptr) {
- BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
- if (!BI || !BI->isUnconditional()) return nullptr;
- }
-
- // Okay, we can do the transformation: create the new PHI node.
- PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
- InsertNewInstBefore(NewPN, *PN);
- NewPN->takeName(PN);
-
- // If we are going to have to insert a new computation, do so right before the
- // predecessor's terminator.
- if (NonConstBB)
- Builder.SetInsertPoint(NonConstBB->getTerminator());
-
- // Next, add all of the operands to the PHI.
- if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
- // We only currently try to fold the condition of a select when it is a phi,
- // not the true/false values.
- Value *TrueV = SI->getTrueValue();
- Value *FalseV = SI->getFalseValue();
- BasicBlock *PhiTransBB = PN->getParent();
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- BasicBlock *ThisBB = PN->getIncomingBlock(i);
- Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
- Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
- Value *InV = nullptr;
- // Beware of ConstantExpr: it may eventually evaluate to getNullValue,
- // even if currently isNullValue gives false.
- Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
- // For vector constants, we cannot use isNullValue to fold into
- // FalseVInPred versus TrueVInPred. When we have individual nonzero
- // elements in the vector, we will incorrectly fold InC to
- // `TrueVInPred`.
- if (InC && !isa<ConstantExpr>(InC) && isa<ConstantInt>(InC))
- InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
- else {
- // Generate the select in the same block as PN's current incoming block.
- // Note: ThisBB need not be the NonConstBB because vector constants
- // which are constants by definition are handled here.
- // FIXME: This can lead to an increase in IR generation because we might
- // generate selects for vector constant phi operand, that could not be
- // folded to TrueVInPred or FalseVInPred as done for ConstantInt. For
- // non-vector phis, this transformation was always profitable because
- // the select would be generated exactly once in the NonConstBB.
- Builder.SetInsertPoint(ThisBB->getTerminator());
- InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred,
- FalseVInPred, "phitmp");
- }
- NewPN->addIncoming(InV, ThisBB);
- }
- } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
- Constant *C = cast<Constant>(I.getOperand(1));
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = nullptr;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
- InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
- else if (isa<ICmpInst>(CI))
- InV = Builder.CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
- C, "phitmp");
- else
- InV = Builder.CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
- C, "phitmp");
- NewPN->addIncoming(InV, PN->getIncomingBlock(i));
- }
- } else if (auto *BO = dyn_cast<BinaryOperator>(&I)) {
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = foldOperationIntoPhiValue(BO, PN->getIncomingValue(i),
- Builder);
- NewPN->addIncoming(InV, PN->getIncomingBlock(i));
- }
- } else {
- CastInst *CI = cast<CastInst>(&I);
- Type *RetTy = CI->getType();
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
- InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
- else
- InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i),
- I.getType(), "phitmp");
- NewPN->addIncoming(InV, PN->getIncomingBlock(i));
- }
- }
-
- for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
- Instruction *User = cast<Instruction>(*UI++);
- if (User == &I) continue;
- replaceInstUsesWith(*User, NewPN);
- eraseInstFromFunction(*User);
- }
- return replaceInstUsesWith(I, NewPN);
-}
-
-Instruction *InstCombiner::foldOpWithConstantIntoOperand(BinaryOperator &I) {
- assert(isa<Constant>(I.getOperand(1)) && "Unexpected operand type");
-
- if (auto *Sel = dyn_cast<SelectInst>(I.getOperand(0))) {
- if (Instruction *NewSel = FoldOpIntoSelect(I, Sel))
- return NewSel;
- } else if (auto *PN = dyn_cast<PHINode>(I.getOperand(0))) {
- if (Instruction *NewPhi = foldOpIntoPhi(I, PN))
- return NewPhi;
- }
- return nullptr;
-}
-
-/// Given a pointer type and a constant offset, determine whether or not there
-/// is a sequence of GEP indices into the pointed type that will land us at the
-/// specified offset. If so, fill them into NewIndices and return the resultant
-/// element type, otherwise return null.
-Type *InstCombiner::FindElementAtOffset(PointerType *PtrTy, int64_t Offset,
- SmallVectorImpl<Value *> &NewIndices) {
- Type *Ty = PtrTy->getElementType();
- if (!Ty->isSized())
- return nullptr;
-
- // Start with the index over the outer type. Note that the type size
- // might be zero (even if the offset isn't zero) if the indexed type
- // is something like [0 x {int, int}]
- Type *IntPtrTy = DL.getIntPtrType(PtrTy);
- int64_t FirstIdx = 0;
- if (int64_t TySize = DL.getTypeAllocSize(Ty)) {
- FirstIdx = Offset/TySize;
- Offset -= FirstIdx*TySize;
-
- // Handle hosts where % returns negative instead of values [0..TySize).
- if (Offset < 0) {
- --FirstIdx;
- Offset += TySize;
- assert(Offset >= 0);
- }
- assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
- }
-
- NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
-
- // Index into the types. If we fail, set OrigBase to null.
- while (Offset) {
- // Indexing into tail padding between struct/array elements.
- if (uint64_t(Offset * 8) >= DL.getTypeSizeInBits(Ty))
- return nullptr;
-
- if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = DL.getStructLayout(STy);
- assert(Offset < (int64_t)SL->getSizeInBytes() &&
- "Offset must stay within the indexed type");
-
- unsigned Elt = SL->getElementContainingOffset(Offset);
- NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
- Elt));
-
- Offset -= SL->getElementOffset(Elt);
- Ty = STy->getElementType(Elt);
- } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- uint64_t EltSize = DL.getTypeAllocSize(AT->getElementType());
- assert(EltSize && "Cannot index into a zero-sized array");
- NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
- Offset %= EltSize;
- Ty = AT->getElementType();
- } else {
- // Otherwise, we can't index into the middle of this atomic type, bail.
- return nullptr;
- }
- }
-
- return Ty;
-}
-
-static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
- // If this GEP has only 0 indices, it is the same pointer as
- // Src. If Src is not a trivial GEP too, don't combine
- // the indices.
- if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
- !Src.hasOneUse())
- return false;
- return true;
-}
-
-/// Return a value X such that Val = X * Scale, or null if none.
-/// If the multiplication is known not to overflow, then NoSignedWrap is set.
-Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
- assert(isa<IntegerType>(Val->getType()) && "Can only descale integers!");
- assert(cast<IntegerType>(Val->getType())->getBitWidth() ==
- Scale.getBitWidth() && "Scale not compatible with value!");
-
- // If Val is zero or Scale is one then Val = Val * Scale.
- if (match(Val, m_Zero()) || Scale == 1) {
- NoSignedWrap = true;
- return Val;
- }
-
- // If Scale is zero then it does not divide Val.
- if (Scale.isMinValue())
- return nullptr;
-
- // Look through chains of multiplications, searching for a constant that is
- // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4
- // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by
- // a factor of 4 will produce X*(Y*2). The principle of operation is to bore
- // down from Val:
- //
- // Val = M1 * X || Analysis starts here and works down
- // M1 = M2 * Y || Doesn't descend into terms with more
- // M2 = Z * 4 \/ than one use
- //
- // Then to modify a term at the bottom:
- //
- // Val = M1 * X
- // M1 = Z * Y || Replaced M2 with Z
- //
- // Then to work back up correcting nsw flags.
-
- // Op - the term we are currently analyzing. Starts at Val then drills down.
- // Replaced with its descaled value before exiting from the drill down loop.
- Value *Op = Val;
-
- // Parent - initially null, but after drilling down notes where Op came from.
- // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the
- // 0'th operand of Val.
- std::pair<Instruction*, unsigned> Parent;
-
- // Set if the transform requires a descaling at deeper levels that doesn't
- // overflow.
- bool RequireNoSignedWrap = false;
-
- // Log base 2 of the scale. Negative if not a power of 2.
- int32_t logScale = Scale.exactLogBase2();
-
- for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- // If Op is a constant divisible by Scale then descale to the quotient.
- APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth.
- APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
- if (!Remainder.isMinValue())
- // Not divisible by Scale.
- return nullptr;
- // Replace with the quotient in the parent.
- Op = ConstantInt::get(CI->getType(), Quotient);
- NoSignedWrap = true;
- break;
- }
-
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op)) {
-
- if (BO->getOpcode() == Instruction::Mul) {
- // Multiplication.
- NoSignedWrap = BO->hasNoSignedWrap();
- if (RequireNoSignedWrap && !NoSignedWrap)
- return nullptr;
-
- // There are three cases for multiplication: multiplication by exactly
- // the scale, multiplication by a constant different to the scale, and
- // multiplication by something else.
- Value *LHS = BO->getOperand(0);
- Value *RHS = BO->getOperand(1);
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
- // Multiplication by a constant.
- if (CI->getValue() == Scale) {
- // Multiplication by exactly the scale, replace the multiplication
- // by its left-hand side in the parent.
- Op = LHS;
- break;
- }
-
- // Otherwise drill down into the constant.
- if (!Op->hasOneUse())
- return nullptr;
-
- Parent = std::make_pair(BO, 1);
- continue;
- }
-
- // Multiplication by something else. Drill down into the left-hand side
- // since that's where the reassociate pass puts the good stuff.
- if (!Op->hasOneUse())
- return nullptr;
-
- Parent = std::make_pair(BO, 0);
- continue;
- }
-
- if (logScale > 0 && BO->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(BO->getOperand(1))) {
- // Multiplication by a power of 2.
- NoSignedWrap = BO->hasNoSignedWrap();
- if (RequireNoSignedWrap && !NoSignedWrap)
- return nullptr;
-
- Value *LHS = BO->getOperand(0);
- int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
- getLimitedValue(Scale.getBitWidth());
- // Op = LHS << Amt.
-
- if (Amt == logScale) {
- // Multiplication by exactly the scale, replace the multiplication
- // by its left-hand side in the parent.
- Op = LHS;
- break;
- }
- if (Amt < logScale || !Op->hasOneUse())
- return nullptr;
-
- // Multiplication by more than the scale. Reduce the multiplying amount
- // by the scale in the parent.
- Parent = std::make_pair(BO, 1);
- Op = ConstantInt::get(BO->getType(), Amt - logScale);
- break;
- }
- }
-
- if (!Op->hasOneUse())
- return nullptr;
-
- if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
- if (Cast->getOpcode() == Instruction::SExt) {
- // Op is sign-extended from a smaller type, descale in the smaller type.
- unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
- APInt SmallScale = Scale.trunc(SmallSize);
- // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to
- // descale Op as (sext Y) * Scale. In order to have
- // sext (Y * SmallScale) = (sext Y) * Scale
- // some conditions need to hold however: SmallScale must sign-extend to
- // Scale and the multiplication Y * SmallScale should not overflow.
- if (SmallScale.sext(Scale.getBitWidth()) != Scale)
- // SmallScale does not sign-extend to Scale.
- return nullptr;
- assert(SmallScale.exactLogBase2() == logScale);
- // Require that Y * SmallScale must not overflow.
- RequireNoSignedWrap = true;
-
- // Drill down through the cast.
- Parent = std::make_pair(Cast, 0);
- Scale = SmallScale;
- continue;
- }
-
- if (Cast->getOpcode() == Instruction::Trunc) {
- // Op is truncated from a larger type, descale in the larger type.
- // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then
- // trunc (Y * sext Scale) = (trunc Y) * Scale
- // always holds. However (trunc Y) * Scale may overflow even if
- // trunc (Y * sext Scale) does not, so nsw flags need to be cleared
- // from this point up in the expression (see later).
- if (RequireNoSignedWrap)
- return nullptr;
-
- // Drill down through the cast.
- unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
- Parent = std::make_pair(Cast, 0);
- Scale = Scale.sext(LargeSize);
- if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits())
- logScale = -1;
- assert(Scale.exactLogBase2() == logScale);
- continue;
- }
- }
-
- // Unsupported expression, bail out.
- return nullptr;
- }
-
- // If Op is zero then Val = Op * Scale.
- if (match(Op, m_Zero())) {
- NoSignedWrap = true;
- return Op;
- }
-
- // We know that we can successfully descale, so from here on we can safely
- // modify the IR. Op holds the descaled version of the deepest term in the
- // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known
- // not to overflow.
-
- if (!Parent.first)
- // The expression only had one term.
- return Op;
-
- // Rewrite the parent using the descaled version of its operand.
- assert(Parent.first->hasOneUse() && "Drilled down when more than one use!");
- assert(Op != Parent.first->getOperand(Parent.second) &&
- "Descaling was a no-op?");
- Parent.first->setOperand(Parent.second, Op);
- Worklist.Add(Parent.first);
-
- // Now work back up the expression correcting nsw flags. The logic is based
- // on the following observation: if X * Y is known not to overflow as a signed
- // multiplication, and Y is replaced by a value Z with smaller absolute value,
- // then X * Z will not overflow as a signed multiplication either. As we work
- // our way up, having NoSignedWrap 'true' means that the descaled value at the
- // current level has strictly smaller absolute value than the original.
- Instruction *Ancestor = Parent.first;
- do {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Ancestor)) {
- // If the multiplication wasn't nsw then we can't say anything about the
- // value of the descaled multiplication, and we have to clear nsw flags
- // from this point on up.
- bool OpNoSignedWrap = BO->hasNoSignedWrap();
- NoSignedWrap &= OpNoSignedWrap;
- if (NoSignedWrap != OpNoSignedWrap) {
- BO->setHasNoSignedWrap(NoSignedWrap);
- Worklist.Add(Ancestor);
- }
- } else if (Ancestor->getOpcode() == Instruction::Trunc) {
- // The fact that the descaled input to the trunc has smaller absolute
- // value than the original input doesn't tell us anything useful about
- // the absolute values of the truncations.
- NoSignedWrap = false;
- }
- assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) &&
- "Failed to keep proper track of nsw flags while drilling down?");
-
- if (Ancestor == Val)
- // Got to the top, all done!
- return Val;
-
- // Move up one level in the expression.
- assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
- Ancestor = Ancestor->user_back();
- } while (1);
-}
-
-/// \brief Creates node of binary operation with the same attributes as the
-/// specified one but with other operands.
-static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
- InstCombiner::BuilderTy &B) {
- Value *BO = B.CreateBinOp(Inst.getOpcode(), LHS, RHS);
- // If LHS and RHS are constant, BO won't be a binary operator.
- if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO))
- NewBO->copyIRFlags(&Inst);
- return BO;
-}
-
-/// \brief Makes transformation of binary operation specific for vector types.
-/// \param Inst Binary operator to transform.
-/// \return Pointer to node that must replace the original binary operator, or
-/// null pointer if no transformation was made.
-Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
- if (!Inst.getType()->isVectorTy()) return nullptr;
-
- // It may not be safe to reorder shuffles and things like div, urem, etc.
- // because we may trap when executing those ops on unknown vector elements.
- // See PR20059.
- if (!isSafeToSpeculativelyExecute(&Inst))
- return nullptr;
-
- unsigned VWidth = cast<VectorType>(Inst.getType())->getNumElements();
- Value *LHS = Inst.getOperand(0), *RHS = Inst.getOperand(1);
- assert(cast<VectorType>(LHS->getType())->getNumElements() == VWidth);
- assert(cast<VectorType>(RHS->getType())->getNumElements() == VWidth);
-
- // If both arguments of the binary operation are shuffles that use the same
- // mask and shuffle within a single vector, move the shuffle after the binop:
- // Op(shuffle(v1, m), shuffle(v2, m)) -> shuffle(Op(v1, v2), m)
- auto *LShuf = dyn_cast<ShuffleVectorInst>(LHS);
- auto *RShuf = dyn_cast<ShuffleVectorInst>(RHS);
- if (LShuf && RShuf && LShuf->getMask() == RShuf->getMask() &&
- isa<UndefValue>(LShuf->getOperand(1)) &&
- isa<UndefValue>(RShuf->getOperand(1)) &&
- LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType()) {
- Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
- RShuf->getOperand(0), Builder);
- return Builder.CreateShuffleVector(
- NewBO, UndefValue::get(NewBO->getType()), LShuf->getMask());
- }
-
- // If one argument is a shuffle within one vector, the other is a constant,
- // try moving the shuffle after the binary operation.
- ShuffleVectorInst *Shuffle = nullptr;
- Constant *C1 = nullptr;
- if (isa<ShuffleVectorInst>(LHS)) Shuffle = cast<ShuffleVectorInst>(LHS);
- if (isa<ShuffleVectorInst>(RHS)) Shuffle = cast<ShuffleVectorInst>(RHS);
- if (isa<Constant>(LHS)) C1 = cast<Constant>(LHS);
- if (isa<Constant>(RHS)) C1 = cast<Constant>(RHS);
- if (Shuffle && C1 &&
- (isa<ConstantVector>(C1) || isa<ConstantDataVector>(C1)) &&
- isa<UndefValue>(Shuffle->getOperand(1)) &&
- Shuffle->getType() == Shuffle->getOperand(0)->getType()) {
- SmallVector<int, 16> ShMask = Shuffle->getShuffleMask();
- // Find constant C2 that has property:
- // shuffle(C2, ShMask) = C1
- // If such constant does not exist (example: ShMask=<0,0> and C1=<1,2>)
- // reorder is not possible.
- SmallVector<Constant*, 16> C2M(VWidth,
- UndefValue::get(C1->getType()->getScalarType()));
- bool MayChange = true;
- for (unsigned I = 0; I < VWidth; ++I) {
- if (ShMask[I] >= 0) {
- assert(ShMask[I] < (int)VWidth);
- if (!isa<UndefValue>(C2M[ShMask[I]])) {
- MayChange = false;
- break;
- }
- C2M[ShMask[I]] = C1->getAggregateElement(I);
- }
- }
- if (MayChange) {
- Constant *C2 = ConstantVector::get(C2M);
- Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0);
- Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;
- Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
- return Builder.CreateShuffleVector(NewBO,
- UndefValue::get(Inst.getType()), Shuffle->getMask());
- }
- }
-
- return nullptr;
-}
-
-Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
- SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
-
- if (Value *V = SimplifyGEPInst(GEP.getSourceElementType(), Ops,
- SQ.getWithInstruction(&GEP)))
- return replaceInstUsesWith(GEP, V);
-
- Value *PtrOp = GEP.getOperand(0);
-
- // Eliminate unneeded casts for indices, and replace indices which displace
- // by multiples of a zero size type with zero.
- bool MadeChange = false;
- Type *IntPtrTy =
- DL.getIntPtrType(GEP.getPointerOperandType()->getScalarType());
-
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
- ++I, ++GTI) {
- // Skip indices into struct types.
- if (GTI.isStruct())
- continue;
-
- // Index type should have the same width as IntPtr
- Type *IndexTy = (*I)->getType();
- Type *NewIndexType = IndexTy->isVectorTy() ?
- VectorType::get(IntPtrTy, IndexTy->getVectorNumElements()) : IntPtrTy;
-
- // If the element type has zero size then any index over it is equivalent
- // to an index of zero, so replace it with zero if it is not zero already.
- Type *EltTy = GTI.getIndexedType();
- if (EltTy->isSized() && DL.getTypeAllocSize(EltTy) == 0)
- if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
- *I = Constant::getNullValue(NewIndexType);
- MadeChange = true;
- }
-
- if (IndexTy != NewIndexType) {
- // If we are using a wider index than needed for this platform, shrink
- // it to what we need. If narrower, sign-extend it to what we need.
- // This explicit cast can make subsequent optimizations more obvious.
- *I = Builder.CreateIntCast(*I, NewIndexType, true);
- MadeChange = true;
- }
- }
- if (MadeChange)
- return &GEP;
-
- // Check to see if the inputs to the PHI node are getelementptr instructions.
- if (PHINode *PN = dyn_cast<PHINode>(PtrOp)) {
- GetElementPtrInst *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
- if (!Op1)
- return nullptr;
-
- // Don't fold a GEP into itself through a PHI node. This can only happen
- // through the back-edge of a loop. Folding a GEP into itself means that
- // the value of the previous iteration needs to be stored in the meantime,
- // thus requiring an additional register variable to be live, but not
- // actually achieving anything (the GEP still needs to be executed once per
- // loop iteration).
- if (Op1 == &GEP)
- return nullptr;
-
- int DI = -1;
-
- for (auto I = PN->op_begin()+1, E = PN->op_end(); I !=E; ++I) {
- GetElementPtrInst *Op2 = dyn_cast<GetElementPtrInst>(*I);
- if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands())
- return nullptr;
-
- // As for Op1 above, don't try to fold a GEP into itself.
- if (Op2 == &GEP)
- return nullptr;
-
- // Keep track of the type as we walk the GEP.
- Type *CurTy = nullptr;
-
- for (unsigned J = 0, F = Op1->getNumOperands(); J != F; ++J) {
- if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
- return nullptr;
-
- if (Op1->getOperand(J) != Op2->getOperand(J)) {
- if (DI == -1) {
- // We have not seen any differences yet in the GEPs feeding the
- // PHI yet, so we record this one if it is allowed to be a
- // variable.
-
- // The first two arguments can vary for any GEP, the rest have to be
- // static for struct slots
- if (J > 1 && CurTy->isStructTy())
- return nullptr;
-
- DI = J;
- } else {
- // The GEP is different by more than one input. While this could be
- // extended to support GEPs that vary by more than one variable it
- // doesn't make sense since it greatly increases the complexity and
- // would result in an R+R+R addressing mode which no backend
- // directly supports and would need to be broken into several
- // simpler instructions anyway.
- return nullptr;
- }
- }
-
- // Sink down a layer of the type for the next iteration.
- if (J > 0) {
- if (J == 1) {
- CurTy = Op1->getSourceElementType();
- } else if (CompositeType *CT = dyn_cast<CompositeType>(CurTy)) {
- CurTy = CT->getTypeAtIndex(Op1->getOperand(J));
- } else {
- CurTy = nullptr;
- }
- }
- }
- }
-
- // If not all GEPs are identical we'll have to create a new PHI node.
- // Check that the old PHI node has only one use so that it will get
- // removed.
- if (DI != -1 && !PN->hasOneUse())
- return nullptr;
-
- GetElementPtrInst *NewGEP = cast<GetElementPtrInst>(Op1->clone());
- if (DI == -1) {
- // All the GEPs feeding the PHI are identical. Clone one down into our
- // BB so that it can be merged with the current GEP.
- GEP.getParent()->getInstList().insert(
- GEP.getParent()->getFirstInsertionPt(), NewGEP);
- } else {
- // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
- // into the current block so it can be merged, and create a new PHI to
- // set that index.
- PHINode *NewPN;
- {
- IRBuilderBase::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(PN);
- NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
- PN->getNumOperands());
- }
-
- for (auto &I : PN->operands())
- NewPN->addIncoming(cast<GEPOperator>(I)->getOperand(DI),
- PN->getIncomingBlock(I));
-
- NewGEP->setOperand(DI, NewPN);
- GEP.getParent()->getInstList().insert(
- GEP.getParent()->getFirstInsertionPt(), NewGEP);
- NewGEP->setOperand(DI, NewPN);
- }
-
- GEP.setOperand(0, NewGEP);
- PtrOp = NewGEP;
- }
-
- // Combine Indices - If the source pointer to this getelementptr instruction
- // is a getelementptr instruction, combine the indices of the two
- // getelementptr instructions into a single instruction.
- //
- if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
- if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
- return nullptr;
-
- // Note that if our source is a gep chain itself then we wait for that
- // chain to be resolved before we perform this transformation. This
- // avoids us creating a TON of code in some cases.
- if (GEPOperator *SrcGEP =
- dyn_cast<GEPOperator>(Src->getOperand(0)))
- if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
- return nullptr; // Wait until our source is folded to completion.
-
- SmallVector<Value*, 8> Indices;
-
- // Find out whether the last index in the source GEP is a sequential idx.
- bool EndsWithSequential = false;
- for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
- I != E; ++I)
- EndsWithSequential = I.isSequential();
-
- // Can we combine the two pointer arithmetics offsets?
- if (EndsWithSequential) {
- // Replace: gep (gep %P, long B), long A, ...
- // With: T = long A+B; gep %P, T, ...
- //
- Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
- Value *GO1 = GEP.getOperand(1);
-
- // If they aren't the same type, then the input hasn't been processed
- // by the loop above yet (which canonicalizes sequential index types to
- // intptr_t). Just avoid transforming this until the input has been
- // normalized.
- if (SO1->getType() != GO1->getType())
- return nullptr;
-
- Value *Sum =
- SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
- // Only do the combine when we are sure the cost after the
- // merge is never more than that before the merge.
- if (Sum == nullptr)
- return nullptr;
-
- // Update the GEP in place if possible.
- if (Src->getNumOperands() == 2) {
- GEP.setOperand(0, Src->getOperand(0));
- GEP.setOperand(1, Sum);
- return &GEP;
- }
- Indices.append(Src->op_begin()+1, Src->op_end()-1);
- Indices.push_back(Sum);
- Indices.append(GEP.op_begin()+2, GEP.op_end());
- } else if (isa<Constant>(*GEP.idx_begin()) &&
- cast<Constant>(*GEP.idx_begin())->isNullValue() &&
- Src->getNumOperands() != 1) {
- // Otherwise we can do the fold if the first index of the GEP is a zero
- Indices.append(Src->op_begin()+1, Src->op_end());
- Indices.append(GEP.idx_begin()+1, GEP.idx_end());
- }
-
- if (!Indices.empty())
- return GEP.isInBounds() && Src->isInBounds()
- ? GetElementPtrInst::CreateInBounds(
- Src->getSourceElementType(), Src->getOperand(0), Indices,
- GEP.getName())
- : GetElementPtrInst::Create(Src->getSourceElementType(),
- Src->getOperand(0), Indices,
- GEP.getName());
- }
-
- if (GEP.getNumIndices() == 1) {
- unsigned AS = GEP.getPointerAddressSpace();
- if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
- DL.getPointerSizeInBits(AS)) {
- Type *Ty = GEP.getSourceElementType();
- uint64_t TyAllocSize = DL.getTypeAllocSize(Ty);
-
- bool Matched = false;
- uint64_t C;
- Value *V = nullptr;
- if (TyAllocSize == 1) {
- V = GEP.getOperand(1);
- Matched = true;
- } else if (match(GEP.getOperand(1),
- m_AShr(m_Value(V), m_ConstantInt(C)))) {
- if (TyAllocSize == 1ULL << C)
- Matched = true;
- } else if (match(GEP.getOperand(1),
- m_SDiv(m_Value(V), m_ConstantInt(C)))) {
- if (TyAllocSize == C)
- Matched = true;
- }
-
- if (Matched) {
- // Canonicalize (gep i8* X, -(ptrtoint Y))
- // to (inttoptr (sub (ptrtoint X), (ptrtoint Y)))
- // The GEP pattern is emitted by the SCEV expander for certain kinds of
- // pointer arithmetic.
- if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
- Operator *Index = cast<Operator>(V);
- Value *PtrToInt = Builder.CreatePtrToInt(PtrOp, Index->getType());
- Value *NewSub = Builder.CreateSub(PtrToInt, Index->getOperand(1));
- return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
- }
- // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
- // to (bitcast Y)
- Value *Y;
- if (match(V, m_Sub(m_PtrToInt(m_Value(Y)),
- m_PtrToInt(m_Specific(GEP.getOperand(0)))))) {
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(Y,
- GEP.getType());
- }
- }
- }
- }
-
- // We do not handle pointer-vector geps here.
- if (GEP.getType()->isVectorTy())
- return nullptr;
-
- // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
- Value *StrippedPtr = PtrOp->stripPointerCasts();
- PointerType *StrippedPtrTy = cast<PointerType>(StrippedPtr->getType());
-
- if (StrippedPtr != PtrOp) {
- bool HasZeroPointerIndex = false;
- if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
- HasZeroPointerIndex = C->isZero();
-
- // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
- // into : GEP [10 x i8]* X, i32 0, ...
- //
- // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
- // into : GEP i8* X, ...
- //
- // This occurs when the program declares an array extern like "int X[];"
- if (HasZeroPointerIndex) {
- if (ArrayType *CATy =
- dyn_cast<ArrayType>(GEP.getSourceElementType())) {
- // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
- if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
- // -> GEP i8* X, ...
- SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
- GetElementPtrInst *Res = GetElementPtrInst::Create(
- StrippedPtrTy->getElementType(), StrippedPtr, Idx, GEP.getName());
- Res->setIsInBounds(GEP.isInBounds());
- if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
- return Res;
- // Insert Res, and create an addrspacecast.
- // e.g.,
- // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
- // ->
- // %0 = GEP i8 addrspace(1)* X, ...
- // addrspacecast i8 addrspace(1)* %0 to i8*
- return new AddrSpaceCastInst(Builder.Insert(Res), GEP.getType());
- }
-
- if (ArrayType *XATy =
- dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
- // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
- if (CATy->getElementType() == XATy->getElementType()) {
- // -> GEP [10 x i8]* X, i32 0, ...
- // At this point, we know that the cast source type is a pointer
- // to an array of the same type as the destination pointer
- // array. Because the array type is never stepped over (there
- // is a leading zero) we can fold the cast into this GEP.
- if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
- GEP.setOperand(0, StrippedPtr);
- GEP.setSourceElementType(XATy);
- return &GEP;
- }
- // Cannot replace the base pointer directly because StrippedPtr's
- // address space is different. Instead, create a new GEP followed by
- // an addrspacecast.
- // e.g.,
- // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
- // i32 0, ...
- // ->
- // %0 = GEP [10 x i8] addrspace(1)* X, ...
- // addrspacecast i8 addrspace(1)* %0 to i8*
- SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
- Value *NewGEP = GEP.isInBounds()
- ? Builder.CreateInBoundsGEP(
- nullptr, StrippedPtr, Idx, GEP.getName())
- : Builder.CreateGEP(nullptr, StrippedPtr, Idx,
- GEP.getName());
- return new AddrSpaceCastInst(NewGEP, GEP.getType());
- }
- }
- }
- } else if (GEP.getNumOperands() == 2) {
- // Transform things like:
- // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
- // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
- Type *SrcElTy = StrippedPtrTy->getElementType();
- Type *ResElTy = GEP.getSourceElementType();
- if (SrcElTy->isArrayTy() &&
- DL.getTypeAllocSize(SrcElTy->getArrayElementType()) ==
- DL.getTypeAllocSize(ResElTy)) {
- Type *IdxType = DL.getIntPtrType(GEP.getType());
- Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
- Value *NewGEP =
- GEP.isInBounds()
- ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
- GEP.getName())
- : Builder.CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
-
- // V and GEP are both pointer types --> BitCast
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
- GEP.getType());
- }
-
- // Transform things like:
- // %V = mul i64 %N, 4
- // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
- // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
- if (ResElTy->isSized() && SrcElTy->isSized()) {
- // Check that changing the type amounts to dividing the index by a scale
- // factor.
- uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
- uint64_t SrcSize = DL.getTypeAllocSize(SrcElTy);
- if (ResSize && SrcSize % ResSize == 0) {
- Value *Idx = GEP.getOperand(1);
- unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
- uint64_t Scale = SrcSize / ResSize;
-
- // Earlier transforms ensure that the index has type IntPtrType, which
- // considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
- "Index not cast to pointer width?");
-
- bool NSW;
- if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
- // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
- // If the multiplication NewIdx * Scale may overflow then the new
- // GEP may not be "inbounds".
- Value *NewGEP =
- GEP.isInBounds() && NSW
- ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
- GEP.getName())
- : Builder.CreateGEP(nullptr, StrippedPtr, NewIdx,
- GEP.getName());
-
- // The NewGEP must be pointer typed, so must the old one -> BitCast
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
- GEP.getType());
- }
- }
- }
-
- // Similarly, transform things like:
- // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
- // (where tmp = 8*tmp2) into:
- // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (ResElTy->isSized() && SrcElTy->isSized() && SrcElTy->isArrayTy()) {
- // Check that changing to the array element type amounts to dividing the
- // index by a scale factor.
- uint64_t ResSize = DL.getTypeAllocSize(ResElTy);
- uint64_t ArrayEltSize =
- DL.getTypeAllocSize(SrcElTy->getArrayElementType());
- if (ResSize && ArrayEltSize % ResSize == 0) {
- Value *Idx = GEP.getOperand(1);
- unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
- uint64_t Scale = ArrayEltSize / ResSize;
-
- // Earlier transforms ensure that the index has type IntPtrType, which
- // considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == DL.getIntPtrType(GEP.getType()) &&
- "Index not cast to pointer width?");
-
- bool NSW;
- if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) {
- // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
- // If the multiplication NewIdx * Scale may overflow then the new
- // GEP may not be "inbounds".
- Value *Off[2] = {
- Constant::getNullValue(DL.getIntPtrType(GEP.getType())),
- NewIdx};
-
- Value *NewGEP = GEP.isInBounds() && NSW
- ? Builder.CreateInBoundsGEP(
- SrcElTy, StrippedPtr, Off, GEP.getName())
- : Builder.CreateGEP(SrcElTy, StrippedPtr, Off,
- GEP.getName());
- // The NewGEP must be pointer typed, so must the old one -> BitCast
- return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
- GEP.getType());
- }
- }
- }
- }
- }
-
- // addrspacecast between types is canonicalized as a bitcast, then an
- // addrspacecast. To take advantage of the below bitcast + struct GEP, look
- // through the addrspacecast.
- if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(PtrOp)) {
- // X = bitcast A addrspace(1)* to B addrspace(1)*
- // Y = addrspacecast A addrspace(1)* to B addrspace(2)*
- // Z = gep Y, <...constant indices...>
- // Into an addrspacecasted GEP of the struct.
- if (BitCastInst *BC = dyn_cast<BitCastInst>(ASC->getOperand(0)))
- PtrOp = BC;
- }
-
- /// See if we can simplify:
- /// X = bitcast A* to B*
- /// Y = gep X, <...constant indices...>
- /// into a gep of the original struct. This is important for SROA and alias
- /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
- Value *Operand = BCI->getOperand(0);
- PointerType *OpType = cast<PointerType>(Operand->getType());
- unsigned OffsetBits = DL.getPointerTypeSizeInBits(GEP.getType());
- APInt Offset(OffsetBits, 0);
- if (!isa<BitCastInst>(Operand) &&
- GEP.accumulateConstantOffset(DL, Offset)) {
-
- // If this GEP instruction doesn't move the pointer, just replace the GEP
- // with a bitcast of the real input to the dest type.
- if (!Offset) {
- // If the bitcast is of an allocation, and the allocation will be
- // converted to match the type of the cast, don't touch this.
- if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, &TLI)) {
- // See if the bitcast simplifies, if so, don't nuke this GEP yet.
- if (Instruction *I = visitBitCast(*BCI)) {
- if (I != BCI) {
- I->takeName(BCI);
- BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
- replaceInstUsesWith(*BCI, I);
- }
- return &GEP;
- }
- }
-
- if (Operand->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(Operand, GEP.getType());
- return new BitCastInst(Operand, GEP.getType());
- }
-
- // Otherwise, if the offset is non-zero, we need to find out if there is a
- // field at Offset in 'A's type. If so, we can pull the cast through the
- // GEP.
- SmallVector<Value*, 8> NewIndices;
- if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
- Value *NGEP =
- GEP.isInBounds()
- ? Builder.CreateInBoundsGEP(nullptr, Operand, NewIndices)
- : Builder.CreateGEP(nullptr, Operand, NewIndices);
-
- if (NGEP->getType() == GEP.getType())
- return replaceInstUsesWith(GEP, NGEP);
- NGEP->takeName(&GEP);
-
- if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
- return new AddrSpaceCastInst(NGEP, GEP.getType());
- return new BitCastInst(NGEP, GEP.getType());
- }
- }
- }
-
- if (!GEP.isInBounds()) {
- unsigned PtrWidth =
- DL.getPointerSizeInBits(PtrOp->getType()->getPointerAddressSpace());
- APInt BasePtrOffset(PtrWidth, 0);
- Value *UnderlyingPtrOp =
- PtrOp->stripAndAccumulateInBoundsConstantOffsets(DL,
- BasePtrOffset);
- if (auto *AI = dyn_cast<AllocaInst>(UnderlyingPtrOp)) {
- if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
- BasePtrOffset.isNonNegative()) {
- APInt AllocSize(PtrWidth, DL.getTypeAllocSize(AI->getAllocatedType()));
- if (BasePtrOffset.ule(AllocSize)) {
- return GetElementPtrInst::CreateInBounds(
- PtrOp, makeArrayRef(Ops).slice(1), GEP.getName());
- }
- }
- }
- }
-
- return nullptr;
-}
-
-static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo *TLI,
- Instruction *AI) {
- if (isa<ConstantPointerNull>(V))
- return true;
- if (auto *LI = dyn_cast<LoadInst>(V))
- return isa<GlobalVariable>(LI->getPointerOperand());
- // Two distinct allocations will never be equal.
- // We rely on LookThroughBitCast in isAllocLikeFn being false, since looking
- // through bitcasts of V can cause
- // the result statement below to be true, even when AI and V (ex:
- // i8* ->i32* ->i8* of AI) are the same allocations.
- return isAllocLikeFn(V, TLI) && V != AI;
-}
-
-static bool isAllocSiteRemovable(Instruction *AI,
- SmallVectorImpl<WeakTrackingVH> &Users,
- const TargetLibraryInfo *TLI) {
- SmallVector<Instruction*, 4> Worklist;
- Worklist.push_back(AI);
-
- do {
- Instruction *PI = Worklist.pop_back_val();
- for (User *U : PI->users()) {
- Instruction *I = cast<Instruction>(U);
- switch (I->getOpcode()) {
- default:
- // Give up the moment we see something we can't handle.
- return false;
-
- case Instruction::AddrSpaceCast:
- case Instruction::BitCast:
- case Instruction::GetElementPtr:
- Users.emplace_back(I);
- Worklist.push_back(I);
- continue;
-
- case Instruction::ICmp: {
- ICmpInst *ICI = cast<ICmpInst>(I);
- // We can fold eq/ne comparisons with null to false/true, respectively.
- // We also fold comparisons in some conditions provided the alloc has
- // not escaped (see isNeverEqualToUnescapedAlloc).
- if (!ICI->isEquality())
- return false;
- unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
- if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
- return false;
- Users.emplace_back(I);
- continue;
- }
-
- case Instruction::Call:
- // Ignore no-op and store intrinsics.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- switch (II->getIntrinsicID()) {
- default:
- return false;
-
- case Intrinsic::memmove:
- case Intrinsic::memcpy:
- case Intrinsic::memset: {
- MemIntrinsic *MI = cast<MemIntrinsic>(II);
- if (MI->isVolatile() || MI->getRawDest() != PI)
- return false;
- LLVM_FALLTHROUGH;
- }
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::objectsize:
- Users.emplace_back(I);
- continue;
- }
- }
-
- if (isFreeCall(I, TLI)) {
- Users.emplace_back(I);
- continue;
- }
- return false;
-
- case Instruction::Store: {
- StoreInst *SI = cast<StoreInst>(I);
- if (SI->isVolatile() || SI->getPointerOperand() != PI)
- return false;
- Users.emplace_back(I);
- continue;
- }
- }
- llvm_unreachable("missing a return?");
- }
- } while (!Worklist.empty());
- return true;
-}
-
-Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
- // If we have a malloc call which is only used in any amount of comparisons
- // to null and free calls, delete the calls and replace the comparisons with
- // true or false as appropriate.
- SmallVector<WeakTrackingVH, 64> Users;
-
- // If we are removing a local variable, insert dbg.value calls before each
- // store.
- DbgDeclareInst *DDI = nullptr;
- std::unique_ptr<DIBuilder> DIB;
- if (isa<AllocaInst>(MI)) {
- DDI = FindAllocaDbgDeclare(&MI);
- DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
- }
-
- if (isAllocSiteRemovable(&MI, Users, &TLI)) {
- for (unsigned i = 0, e = Users.size(); i != e; ++i) {
- // Lowering all @llvm.objectsize calls first because they may
- // use a bitcast/GEP of the alloca we are removing.
- if (!Users[i])
- continue;
-
- Instruction *I = cast<Instruction>(&*Users[i]);
-
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- if (II->getIntrinsicID() == Intrinsic::objectsize) {
- ConstantInt *Result = lowerObjectSizeCall(II, DL, &TLI,
- /*MustSucceed=*/true);
- replaceInstUsesWith(*I, Result);
- eraseInstFromFunction(*I);
- Users[i] = nullptr; // Skip examining in the next loop.
- }
- }
- }
- for (unsigned i = 0, e = Users.size(); i != e; ++i) {
- if (!Users[i])
- continue;
-
- Instruction *I = cast<Instruction>(&*Users[i]);
-
- if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
- replaceInstUsesWith(*C,
- ConstantInt::get(Type::getInt1Ty(C->getContext()),
- C->isFalseWhenEqual()));
- } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I) ||
- isa<AddrSpaceCastInst>(I)) {
- replaceInstUsesWith(*I, UndefValue::get(I->getType()));
- } else if (DDI && isa<StoreInst>(I)) {
- ConvertDebugDeclareToDebugValue(DDI, cast<StoreInst>(I), *DIB);
- }
- eraseInstFromFunction(*I);
- }
-
- if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
- // Replace invoke with a NOP intrinsic to maintain the original CFG
- Module *M = II->getModule();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::donothing);
- InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
- None, "", II->getParent());
- }
-
- if (DDI)
- eraseInstFromFunction(*DDI);
-
- return eraseInstFromFunction(MI);
- }
- return nullptr;
-}
-
-/// \brief Move the call to free before a NULL test.
-///
-/// Check if this free is accessed after its argument has been test
-/// against NULL (property 0).
-/// If yes, it is legal to move this call in its predecessor block.
-///
-/// The move is performed only if the block containing the call to free
-/// will be removed, i.e.:
-/// 1. it has only one predecessor P, and P has two successors
-/// 2. it contains the call and an unconditional branch
-/// 3. its successor is the same as its predecessor's successor
-///
-/// The profitability is out-of concern here and this function should
-/// be called only if the caller knows this transformation would be
-/// profitable (e.g., for code size).
-static Instruction *
-tryToMoveFreeBeforeNullTest(CallInst &FI) {
- Value *Op = FI.getArgOperand(0);
- BasicBlock *FreeInstrBB = FI.getParent();
- BasicBlock *PredBB = FreeInstrBB->getSinglePredecessor();
-
- // Validate part of constraint #1: Only one predecessor
- // FIXME: We can extend the number of predecessor, but in that case, we
- // would duplicate the call to free in each predecessor and it may
- // not be profitable even for code size.
- if (!PredBB)
- return nullptr;
-
- // Validate constraint #2: Does this block contains only the call to
- // free and an unconditional branch?
- // FIXME: We could check if we can speculate everything in the
- // predecessor block
- if (FreeInstrBB->size() != 2)
- return nullptr;
- BasicBlock *SuccBB;
- if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
- return nullptr;
-
- // Validate the rest of constraint #1 by matching on the pred branch.
- TerminatorInst *TI = PredBB->getTerminator();
- BasicBlock *TrueBB, *FalseBB;
- ICmpInst::Predicate Pred;
- if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
- return nullptr;
- if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
- return nullptr;
-
- // Validate constraint #3: Ensure the null case just falls through.
- if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
- return nullptr;
- assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
- "Broken CFG: missing edge from predecessor to successor");
-
- FI.moveBefore(TI);
- return &FI;
-}
-
-
-Instruction *InstCombiner::visitFree(CallInst &FI) {
- Value *Op = FI.getArgOperand(0);
-
- // free undef -> unreachable.
- if (isa<UndefValue>(Op)) {
- // Insert a new store to null because we cannot modify the CFG here.
- Builder.CreateStore(ConstantInt::getTrue(FI.getContext()),
- UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
- return eraseInstFromFunction(FI);
- }
-
- // If we have 'free null' delete the instruction. This can happen in stl code
- // when lots of inlining happens.
- if (isa<ConstantPointerNull>(Op))
- return eraseInstFromFunction(FI);
-
- // If we optimize for code size, try to move the call to free before the null
- // test so that simplify cfg can remove the empty block and dead code
- // elimination the branch. I.e., helps to turn something like:
- // if (foo) free(foo);
- // into
- // free(foo);
- if (MinimizeSize)
- if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
- return I;
-
- return nullptr;
-}
-
-Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
- if (RI.getNumOperands() == 0) // ret void
- return nullptr;
-
- Value *ResultOp = RI.getOperand(0);
- Type *VTy = ResultOp->getType();
- if (!VTy->isIntegerTy())
- return nullptr;
-
- // There might be assume intrinsics dominating this return that completely
- // determine the value. If so, constant fold it.
- KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
- if (Known.isConstant())
- RI.setOperand(0, Constant::getIntegerValue(VTy, Known.getConstant()));
-
- return nullptr;
-}
-
-Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
- // Change br (not X), label True, label False to: br X, label False, True
- Value *X = nullptr;
- BasicBlock *TrueDest;
- BasicBlock *FalseDest;
- if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
- !isa<Constant>(X)) {
- // Swap Destinations and condition...
- BI.setCondition(X);
- BI.swapSuccessors();
- return &BI;
- }
-
- // If the condition is irrelevant, remove the use so that other
- // transforms on the condition become more effective.
- if (BI.isConditional() &&
- BI.getSuccessor(0) == BI.getSuccessor(1) &&
- !isa<UndefValue>(BI.getCondition())) {
- BI.setCondition(UndefValue::get(BI.getCondition()->getType()));
- return &BI;
- }
-
- // Canonicalize, for example, icmp_ne -> icmp_eq or fcmp_one -> fcmp_oeq.
- CmpInst::Predicate Pred;
- if (match(&BI, m_Br(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), TrueDest,
- FalseDest)) &&
- !isCanonicalPredicate(Pred)) {
- // Swap destinations and condition.
- CmpInst *Cond = cast<CmpInst>(BI.getCondition());
- Cond->setPredicate(CmpInst::getInversePredicate(Pred));
- BI.swapSuccessors();
- Worklist.Add(Cond);
- return &BI;
- }
-
- return nullptr;
-}
-
-Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
- Value *Cond = SI.getCondition();
- Value *Op0;
- ConstantInt *AddRHS;
- if (match(Cond, m_Add(m_Value(Op0), m_ConstantInt(AddRHS)))) {
- // Change 'switch (X+4) case 1:' into 'switch (X) case -3'.
- for (auto Case : SI.cases()) {
- Constant *NewCase = ConstantExpr::getSub(Case.getCaseValue(), AddRHS);
- assert(isa<ConstantInt>(NewCase) &&
- "Result of expression should be constant");
- Case.setValue(cast<ConstantInt>(NewCase));
- }
- SI.setCondition(Op0);
- return &SI;
- }
-
- KnownBits Known = computeKnownBits(Cond, 0, &SI);
- unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
- unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
-
- // Compute the number of leading bits we can ignore.
- // TODO: A better way to determine this would use ComputeNumSignBits().
- for (auto &C : SI.cases()) {
- LeadingKnownZeros = std::min(
- LeadingKnownZeros, C.getCaseValue()->getValue().countLeadingZeros());
- LeadingKnownOnes = std::min(
- LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
- }
-
- unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
-
- // Shrink the condition operand if the new type is smaller than the old type.
- // This may produce a non-standard type for the switch, but that's ok because
- // the backend should extend back to a legal type for the target.
- if (NewWidth > 0 && NewWidth < Known.getBitWidth()) {
- IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
- Builder.SetInsertPoint(&SI);
- Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
- SI.setCondition(NewCond);
-
- for (auto Case : SI.cases()) {
- APInt TruncatedCase = Case.getCaseValue()->getValue().trunc(NewWidth);
- Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
- }
- return &SI;
- }
-
- return nullptr;
-}
-
-Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
- Value *Agg = EV.getAggregateOperand();
-
- if (!EV.hasIndices())
- return replaceInstUsesWith(EV, Agg);
-
- if (Value *V = SimplifyExtractValueInst(Agg, EV.getIndices(),
- SQ.getWithInstruction(&EV)))
- return replaceInstUsesWith(EV, V);
-
- if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
- // We're extracting from an insertvalue instruction, compare the indices
- const unsigned *exti, *exte, *insi, *inse;
- for (exti = EV.idx_begin(), insi = IV->idx_begin(),
- exte = EV.idx_end(), inse = IV->idx_end();
- exti != exte && insi != inse;
- ++exti, ++insi) {
- if (*insi != *exti)
- // The insert and extract both reference distinctly different elements.
- // This means the extract is not influenced by the insert, and we can
- // replace the aggregate operand of the extract with the aggregate
- // operand of the insert. i.e., replace
- // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
- // %E = extractvalue { i32, { i32 } } %I, 0
- // with
- // %E = extractvalue { i32, { i32 } } %A, 0
- return ExtractValueInst::Create(IV->getAggregateOperand(),
- EV.getIndices());
- }
- if (exti == exte && insi == inse)
- // Both iterators are at the end: Index lists are identical. Replace
- // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
- // %C = extractvalue { i32, { i32 } } %B, 1, 0
- // with "i32 42"
- return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
- if (exti == exte) {
- // The extract list is a prefix of the insert list. i.e. replace
- // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
- // %E = extractvalue { i32, { i32 } } %I, 1
- // with
- // %X = extractvalue { i32, { i32 } } %A, 1
- // %E = insertvalue { i32 } %X, i32 42, 0
- // by switching the order of the insert and extract (though the
- // insertvalue should be left in, since it may have other uses).
- Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
- EV.getIndices());
- return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
- makeArrayRef(insi, inse));
- }
- if (insi == inse)
- // The insert list is a prefix of the extract list
- // We can simply remove the common indices from the extract and make it
- // operate on the inserted value instead of the insertvalue result.
- // i.e., replace
- // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
- // %E = extractvalue { i32, { i32 } } %I, 1, 0
- // with
- // %E extractvalue { i32 } { i32 42 }, 0
- return ExtractValueInst::Create(IV->getInsertedValueOperand(),
- makeArrayRef(exti, exte));
- }
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
- // We're extracting from an intrinsic, see if we're the only user, which
- // allows us to simplify multiple result intrinsics to simpler things that
- // just get one value.
- if (II->hasOneUse()) {
- // Check if we're grabbing the overflow bit or the result of a 'with
- // overflow' intrinsic. If it's the latter we can remove the intrinsic
- // and replace it with a traditional binary instruction.
- switch (II->getIntrinsicID()) {
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
- replaceInstUsesWith(*II, UndefValue::get(II->getType()));
- eraseInstFromFunction(*II);
- return BinaryOperator::CreateAdd(LHS, RHS);
- }
-
- // If the normal result of the add is dead, and the RHS is a constant,
- // we can transform this into a range comparison.
- // overflow = uadd a, -4 --> overflow = icmp ugt a, 3
- if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
- return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
- ConstantExpr::getNot(CI));
- break;
- case Intrinsic::usub_with_overflow:
- case Intrinsic::ssub_with_overflow:
- if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
- replaceInstUsesWith(*II, UndefValue::get(II->getType()));
- eraseInstFromFunction(*II);
- return BinaryOperator::CreateSub(LHS, RHS);
- }
- break;
- case Intrinsic::umul_with_overflow:
- case Intrinsic::smul_with_overflow:
- if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
- replaceInstUsesWith(*II, UndefValue::get(II->getType()));
- eraseInstFromFunction(*II);
- return BinaryOperator::CreateMul(LHS, RHS);
- }
- break;
- default:
- break;
- }
- }
- }
- if (LoadInst *L = dyn_cast<LoadInst>(Agg))
- // If the (non-volatile) load only has one use, we can rewrite this to a
- // load from a GEP. This reduces the size of the load. If a load is used
- // only by extractvalue instructions then this either must have been
- // optimized before, or it is a struct with padding, in which case we
- // don't want to do the transformation as it loses padding knowledge.
- if (L->isSimple() && L->hasOneUse()) {
- // extractvalue has integer indices, getelementptr has Value*s. Convert.
- SmallVector<Value*, 4> Indices;
- // Prefix an i32 0 since we need the first element.
- Indices.push_back(Builder.getInt32(0));
- for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
- I != E; ++I)
- Indices.push_back(Builder.getInt32(*I));
-
- // We need to insert these at the location of the old load, not at that of
- // the extractvalue.
- Builder.SetInsertPoint(L);
- Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
- L->getPointerOperand(), Indices);
- Instruction *NL = Builder.CreateLoad(GEP);
- // Whatever aliasing information we had for the orignal load must also
- // hold for the smaller load, so propagate the annotations.
- AAMDNodes Nodes;
- L->getAAMetadata(Nodes);
- NL->setAAMetadata(Nodes);
- // Returning the load directly will cause the main loop to insert it in
- // the wrong spot, so use replaceInstUsesWith().
- return replaceInstUsesWith(EV, NL);
- }
- // We could simplify extracts from other values. Note that nested extracts may
- // already be simplified implicitly by the above: extract (extract (insert) )
- // will be translated into extract ( insert ( extract ) ) first and then just
- // the value inserted, if appropriate. Similarly for extracts from single-use
- // loads: extract (extract (load)) will be translated to extract (load (gep))
- // and if again single-use then via load (gep (gep)) to load (gep).
- // However, double extracts from e.g. function arguments or return values
- // aren't handled yet.
- return nullptr;
-}
-
-/// Return 'true' if the given typeinfo will match anything.
-static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {
- switch (Personality) {
- case EHPersonality::GNU_C:
- case EHPersonality::GNU_C_SjLj:
- case EHPersonality::Rust:
- // The GCC C EH and Rust personality only exists to support cleanups, so
- // it's not clear what the semantics of catch clauses are.
- return false;
- case EHPersonality::Unknown:
- return false;
- case EHPersonality::GNU_Ada:
- // While __gnat_all_others_value will match any Ada exception, it doesn't
- // match foreign exceptions (or didn't, before gcc-4.7).
- return false;
- case EHPersonality::GNU_CXX:
- case EHPersonality::GNU_CXX_SjLj:
- case EHPersonality::GNU_ObjC:
- case EHPersonality::MSVC_X86SEH:
- case EHPersonality::MSVC_Win64SEH:
- case EHPersonality::MSVC_CXX:
- case EHPersonality::CoreCLR:
- return TypeInfo->isNullValue();
- }
- llvm_unreachable("invalid enum");
-}
-
-static bool shorter_filter(const Value *LHS, const Value *RHS) {
- return
- cast<ArrayType>(LHS->getType())->getNumElements()
- <
- cast<ArrayType>(RHS->getType())->getNumElements();
-}
-
-Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
- // The logic here should be correct for any real-world personality function.
- // However if that turns out not to be true, the offending logic can always
- // be conditioned on the personality function, like the catch-all logic is.
- EHPersonality Personality =
- classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
-
- // Simplify the list of clauses, eg by removing repeated catch clauses
- // (these are often created by inlining).
- bool MakeNewInstruction = false; // If true, recreate using the following:
- SmallVector<Constant *, 16> NewClauses; // - Clauses for the new instruction;
- bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
-
- SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
- for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
- bool isLastClause = i + 1 == e;
- if (LI.isCatch(i)) {
- // A catch clause.
- Constant *CatchClause = LI.getClause(i);
- Constant *TypeInfo = CatchClause->stripPointerCasts();
-
- // If we already saw this clause, there is no point in having a second
- // copy of it.
- if (AlreadyCaught.insert(TypeInfo).second) {
- // This catch clause was not already seen.
- NewClauses.push_back(CatchClause);
- } else {
- // Repeated catch clause - drop the redundant copy.
- MakeNewInstruction = true;
- }
-
- // If this is a catch-all then there is no point in keeping any following
- // clauses or marking the landingpad as having a cleanup.
- if (isCatchAll(Personality, TypeInfo)) {
- if (!isLastClause)
- MakeNewInstruction = true;
- CleanupFlag = false;
- break;
- }
- } else {
- // A filter clause. If any of the filter elements were already caught
- // then they can be dropped from the filter. It is tempting to try to
- // exploit the filter further by saying that any typeinfo that does not
- // occur in the filter can't be caught later (and thus can be dropped).
- // However this would be wrong, since typeinfos can match without being
- // equal (for example if one represents a C++ class, and the other some
- // class derived from it).
- assert(LI.isFilter(i) && "Unsupported landingpad clause!");
- Constant *FilterClause = LI.getClause(i);
- ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
- unsigned NumTypeInfos = FilterType->getNumElements();
-
- // An empty filter catches everything, so there is no point in keeping any
- // following clauses or marking the landingpad as having a cleanup. By
- // dealing with this case here the following code is made a bit simpler.
- if (!NumTypeInfos) {
- NewClauses.push_back(FilterClause);
- if (!isLastClause)
- MakeNewInstruction = true;
- CleanupFlag = false;
- break;
- }
-
- bool MakeNewFilter = false; // If true, make a new filter.
- SmallVector<Constant *, 16> NewFilterElts; // New elements.
- if (isa<ConstantAggregateZero>(FilterClause)) {
- // Not an empty filter - it contains at least one null typeinfo.
- assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
- Constant *TypeInfo =
- Constant::getNullValue(FilterType->getElementType());
- // If this typeinfo is a catch-all then the filter can never match.
- if (isCatchAll(Personality, TypeInfo)) {
- // Throw the filter away.
- MakeNewInstruction = true;
- continue;
- }
-
- // There is no point in having multiple copies of this typeinfo, so
- // discard all but the first copy if there is more than one.
- NewFilterElts.push_back(TypeInfo);
- if (NumTypeInfos > 1)
- MakeNewFilter = true;
- } else {
- ConstantArray *Filter = cast<ConstantArray>(FilterClause);
- SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
- NewFilterElts.reserve(NumTypeInfos);
-
- // Remove any filter elements that were already caught or that already
- // occurred in the filter. While there, see if any of the elements are
- // catch-alls. If so, the filter can be discarded.
- bool SawCatchAll = false;
- for (unsigned j = 0; j != NumTypeInfos; ++j) {
- Constant *Elt = Filter->getOperand(j);
- Constant *TypeInfo = Elt->stripPointerCasts();
- if (isCatchAll(Personality, TypeInfo)) {
- // This element is a catch-all. Bail out, noting this fact.
- SawCatchAll = true;
- break;
- }
-
- // Even if we've seen a type in a catch clause, we don't want to
- // remove it from the filter. An unexpected type handler may be
- // set up for a call site which throws an exception of the same
- // type caught. In order for the exception thrown by the unexpected
- // handler to propagate correctly, the filter must be correctly
- // described for the call site.
- //
- // Example:
- //
- // void unexpected() { throw 1;}
- // void foo() throw (int) {
- // std::set_unexpected(unexpected);
- // try {
- // throw 2.0;
- // } catch (int i) {}
- // }
-
- // There is no point in having multiple copies of the same typeinfo in
- // a filter, so only add it if we didn't already.
- if (SeenInFilter.insert(TypeInfo).second)
- NewFilterElts.push_back(cast<Constant>(Elt));
- }
- // A filter containing a catch-all cannot match anything by definition.
- if (SawCatchAll) {
- // Throw the filter away.
- MakeNewInstruction = true;
- continue;
- }
-
- // If we dropped something from the filter, make a new one.
- if (NewFilterElts.size() < NumTypeInfos)
- MakeNewFilter = true;
- }
- if (MakeNewFilter) {
- FilterType = ArrayType::get(FilterType->getElementType(),
- NewFilterElts.size());
- FilterClause = ConstantArray::get(FilterType, NewFilterElts);
- MakeNewInstruction = true;
- }
-
- NewClauses.push_back(FilterClause);
-
- // If the new filter is empty then it will catch everything so there is
- // no point in keeping any following clauses or marking the landingpad
- // as having a cleanup. The case of the original filter being empty was
- // already handled above.
- if (MakeNewFilter && !NewFilterElts.size()) {
- assert(MakeNewInstruction && "New filter but not a new instruction!");
- CleanupFlag = false;
- break;
- }
- }
- }
-
- // If several filters occur in a row then reorder them so that the shortest
- // filters come first (those with the smallest number of elements). This is
- // advantageous because shorter filters are more likely to match, speeding up
- // unwinding, but mostly because it increases the effectiveness of the other
- // filter optimizations below.
- for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
- unsigned j;
- // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
- for (j = i; j != e; ++j)
- if (!isa<ArrayType>(NewClauses[j]->getType()))
- break;
-
- // Check whether the filters are already sorted by length. We need to know
- // if sorting them is actually going to do anything so that we only make a
- // new landingpad instruction if it does.
- for (unsigned k = i; k + 1 < j; ++k)
- if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
- // Not sorted, so sort the filters now. Doing an unstable sort would be
- // correct too but reordering filters pointlessly might confuse users.
- std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
- shorter_filter);
- MakeNewInstruction = true;
- break;
- }
-
- // Look for the next batch of filters.
- i = j + 1;
- }
-
- // If typeinfos matched if and only if equal, then the elements of a filter L
- // that occurs later than a filter F could be replaced by the intersection of
- // the elements of F and L. In reality two typeinfos can match without being
- // equal (for example if one represents a C++ class, and the other some class
- // derived from it) so it would be wrong to perform this transform in general.
- // However the transform is correct and useful if F is a subset of L. In that
- // case L can be replaced by F, and thus removed altogether since repeating a
- // filter is pointless. So here we look at all pairs of filters F and L where
- // L follows F in the list of clauses, and remove L if every element of F is
- // an element of L. This can occur when inlining C++ functions with exception
- // specifications.
- for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
- // Examine each filter in turn.
- Value *Filter = NewClauses[i];
- ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
- if (!FTy)
- // Not a filter - skip it.
- continue;
- unsigned FElts = FTy->getNumElements();
- // Examine each filter following this one. Doing this backwards means that
- // we don't have to worry about filters disappearing under us when removed.
- for (unsigned j = NewClauses.size() - 1; j != i; --j) {
- Value *LFilter = NewClauses[j];
- ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
- if (!LTy)
- // Not a filter - skip it.
- continue;
- // If Filter is a subset of LFilter, i.e. every element of Filter is also
- // an element of LFilter, then discard LFilter.
- SmallVectorImpl<Constant *>::iterator J = NewClauses.begin() + j;
- // If Filter is empty then it is a subset of LFilter.
- if (!FElts) {
- // Discard LFilter.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- // Move on to the next filter.
- continue;
- }
- unsigned LElts = LTy->getNumElements();
- // If Filter is longer than LFilter then it cannot be a subset of it.
- if (FElts > LElts)
- // Move on to the next filter.
- continue;
- // At this point we know that LFilter has at least one element.
- if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
- // Filter is a subset of LFilter iff Filter contains only zeros (as we
- // already know that Filter is not longer than LFilter).
- if (isa<ConstantAggregateZero>(Filter)) {
- assert(FElts <= LElts && "Should have handled this case earlier!");
- // Discard LFilter.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- }
- // Move on to the next filter.
- continue;
- }
- ConstantArray *LArray = cast<ConstantArray>(LFilter);
- if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
- // Since Filter is non-empty and contains only zeros, it is a subset of
- // LFilter iff LFilter contains a zero.
- assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
- for (unsigned l = 0; l != LElts; ++l)
- if (LArray->getOperand(l)->isNullValue()) {
- // LFilter contains a zero - discard it.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- break;
- }
- // Move on to the next filter.
- continue;
- }
- // At this point we know that both filters are ConstantArrays. Loop over
- // operands to see whether every element of Filter is also an element of
- // LFilter. Since filters tend to be short this is probably faster than
- // using a method that scales nicely.
- ConstantArray *FArray = cast<ConstantArray>(Filter);
- bool AllFound = true;
- for (unsigned f = 0; f != FElts; ++f) {
- Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
- AllFound = false;
- for (unsigned l = 0; l != LElts; ++l) {
- Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
- if (LTypeInfo == FTypeInfo) {
- AllFound = true;
- break;
- }
- }
- if (!AllFound)
- break;
- }
- if (AllFound) {
- // Discard LFilter.
- NewClauses.erase(J);
- MakeNewInstruction = true;
- }
- // Move on to the next filter.
- }
- }
-
- // If we changed any of the clauses, replace the old landingpad instruction
- // with a new one.
- if (MakeNewInstruction) {
- LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
- NewClauses.size());
- for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
- NLI->addClause(NewClauses[i]);
- // A landing pad with no clauses must have the cleanup flag set. It is
- // theoretically possible, though highly unlikely, that we eliminated all
- // clauses. If so, force the cleanup flag to true.
- if (NewClauses.empty())
- CleanupFlag = true;
- NLI->setCleanup(CleanupFlag);
- return NLI;
- }
-
- // Even if none of the clauses changed, we may nonetheless have understood
- // that the cleanup flag is pointless. Clear it if so.
- if (LI.isCleanup() != CleanupFlag) {
- assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
- LI.setCleanup(CleanupFlag);
- return &LI;
- }
-
- return nullptr;
-}
-
-/// Try to move the specified instruction from its current block into the
-/// beginning of DestBlock, which can only happen if it's safe to move the
-/// instruction past all of the instructions between it and the end of its
-/// block.
-static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
- assert(I->hasOneUse() && "Invariants didn't hold!");
-
- // Cannot move control-flow-involving, volatile loads, vaarg, etc.
- if (isa<PHINode>(I) || I->isEHPad() || I->mayHaveSideEffects() ||
- isa<TerminatorInst>(I))
- return false;
-
- // Do not sink alloca instructions out of the entry block.
- if (isa<AllocaInst>(I) && I->getParent() ==
- &DestBlock->getParent()->getEntryBlock())
- return false;
-
- // Do not sink into catchswitch blocks.
- if (isa<CatchSwitchInst>(DestBlock->getTerminator()))
- return false;
-
- // Do not sink convergent call instructions.
- if (auto *CI = dyn_cast<CallInst>(I)) {
- if (CI->isConvergent())
- return false;
- }
- // We can only sink load instructions if there is nothing between the load and
- // the end of block that could change the value.
- if (I->mayReadFromMemory()) {
- for (BasicBlock::iterator Scan = I->getIterator(),
- E = I->getParent()->end();
- Scan != E; ++Scan)
- if (Scan->mayWriteToMemory())
- return false;
- }
-
- BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
- I->moveBefore(&*InsertPos);
- ++NumSunkInst;
- return true;
-}
-
-bool InstCombiner::run() {
- while (!Worklist.isEmpty()) {
- Instruction *I = Worklist.RemoveOne();
- if (I == nullptr) continue; // skip null values.
-
- // Check to see if we can DCE the instruction.
- if (isInstructionTriviallyDead(I, &TLI)) {
- DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
- eraseInstFromFunction(*I);
- ++NumDeadInst;
- MadeIRChange = true;
- continue;
- }
-
- if (!DebugCounter::shouldExecute(VisitCounter))
- continue;
-
- // Instruction isn't dead, see if we can constant propagate it.
- if (!I->use_empty() &&
- (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
- if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
- DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
-
- // Add operands to the worklist.
- replaceInstUsesWith(*I, C);
- ++NumConstProp;
- if (isInstructionTriviallyDead(I, &TLI))
- eraseInstFromFunction(*I);
- MadeIRChange = true;
- continue;
- }
- }
-
- // In general, it is possible for computeKnownBits to determine all bits in
- // a value even when the operands are not all constants.
- Type *Ty = I->getType();
- if (ExpensiveCombines && !I->use_empty() && Ty->isIntOrIntVectorTy()) {
- KnownBits Known = computeKnownBits(I, /*Depth*/0, I);
- if (Known.isConstant()) {
- Constant *C = ConstantInt::get(Ty, Known.getConstant());
- DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
- " from: " << *I << '\n');
-
- // Add operands to the worklist.
- replaceInstUsesWith(*I, C);
- ++NumConstProp;
- if (isInstructionTriviallyDead(I, &TLI))
- eraseInstFromFunction(*I);
- MadeIRChange = true;
- continue;
- }
- }
-
- // See if we can trivially sink this instruction to a successor basic block.
- if (I->hasOneUse()) {
- BasicBlock *BB = I->getParent();
- Instruction *UserInst = cast<Instruction>(*I->user_begin());
- BasicBlock *UserParent;
-
- // Get the block the use occurs in.
- if (PHINode *PN = dyn_cast<PHINode>(UserInst))
- UserParent = PN->getIncomingBlock(*I->use_begin());
- else
- UserParent = UserInst->getParent();
-
- if (UserParent != BB) {
- bool UserIsSuccessor = false;
- // See if the user is one of our successors.
- for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
- if (*SI == UserParent) {
- UserIsSuccessor = true;
- break;
- }
-
- // If the user is one of our immediate successors, and if that successor
- // only has us as a predecessors (we'd have to split the critical edge
- // otherwise), we can keep going.
- if (UserIsSuccessor && UserParent->getUniquePredecessor()) {
- // Okay, the CFG is simple enough, try to sink this instruction.
- if (TryToSinkInstruction(I, UserParent)) {
- DEBUG(dbgs() << "IC: Sink: " << *I << '\n');
- MadeIRChange = true;
- // We'll add uses of the sunk instruction below, but since sinking
- // can expose opportunities for it's *operands* add them to the
- // worklist
- for (Use &U : I->operands())
- if (Instruction *OpI = dyn_cast<Instruction>(U.get()))
- Worklist.Add(OpI);
- }
- }
- }
- }
-
- // Now that we have an instruction, try combining it to simplify it.
- Builder.SetInsertPoint(I);
- Builder.SetCurrentDebugLocation(I->getDebugLoc());
-
-#ifndef NDEBUG
- std::string OrigI;
-#endif
- DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
- DEBUG(dbgs() << "IC: Visiting: " << OrigI << '\n');
-
- if (Instruction *Result = visit(*I)) {
- ++NumCombined;
- // Should we replace the old instruction with a new one?
- if (Result != I) {
- DEBUG(dbgs() << "IC: Old = " << *I << '\n'
- << " New = " << *Result << '\n');
-
- if (I->getDebugLoc())
- Result->setDebugLoc(I->getDebugLoc());
- // Everything uses the new instruction now.
- I->replaceAllUsesWith(Result);
-
- // Move the name to the new instruction first.
- Result->takeName(I);
-
- // Push the new instruction and any users onto the worklist.
- Worklist.AddUsersToWorkList(*Result);
- Worklist.Add(Result);
-
- // Insert the new instruction into the basic block...
- BasicBlock *InstParent = I->getParent();
- BasicBlock::iterator InsertPos = I->getIterator();
-
- // If we replace a PHI with something that isn't a PHI, fix up the
- // insertion point.
- if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
- InsertPos = InstParent->getFirstInsertionPt();
-
- InstParent->getInstList().insert(InsertPos, Result);
-
- eraseInstFromFunction(*I);
- } else {
- DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
- << " New = " << *I << '\n');
-
- // If the instruction was modified, it's possible that it is now dead.
- // if so, remove it.
- if (isInstructionTriviallyDead(I, &TLI)) {
- eraseInstFromFunction(*I);
- } else {
- Worklist.AddUsersToWorkList(*I);
- Worklist.Add(I);
- }
- }
- MadeIRChange = true;
- }
- }
-
- Worklist.Zap();
- return MadeIRChange;
-}
-
-/// Walk the function in depth-first order, adding all reachable code to the
-/// worklist.
-///
-/// This has a couple of tricks to make the code faster and more powerful. In
-/// particular, we constant fold and DCE instructions as we go, to avoid adding
-/// them to the worklist (this significantly speeds up instcombine on code where
-/// many instructions are dead or constant). Additionally, if we find a branch
-/// whose condition is a known constant, we only visit the reachable successors.
-///
-static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
- SmallPtrSetImpl<BasicBlock *> &Visited,
- InstCombineWorklist &ICWorklist,
- const TargetLibraryInfo *TLI) {
- bool MadeIRChange = false;
- SmallVector<BasicBlock*, 256> Worklist;
- Worklist.push_back(BB);
-
- SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
- DenseMap<Constant *, Constant *> FoldedConstants;
-
- do {
- BB = Worklist.pop_back_val();
-
- // We have now visited this block! If we've already been here, ignore it.
- if (!Visited.insert(BB).second)
- continue;
-
- for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
- Instruction *Inst = &*BBI++;
-
- // DCE instruction if trivially dead.
- if (isInstructionTriviallyDead(Inst, TLI)) {
- ++NumDeadInst;
- DEBUG(dbgs() << "IC: DCE: " << *Inst << '\n');
- Inst->eraseFromParent();
- MadeIRChange = true;
- continue;
- }
-
- // ConstantProp instruction if trivially constant.
- if (!Inst->use_empty() &&
- (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0))))
- if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
- DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
- << *Inst << '\n');
- Inst->replaceAllUsesWith(C);
- ++NumConstProp;
- if (isInstructionTriviallyDead(Inst, TLI))
- Inst->eraseFromParent();
- MadeIRChange = true;
- continue;
- }
-
- // See if we can constant fold its operands.
- for (Use &U : Inst->operands()) {
- if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
- continue;
-
- auto *C = cast<Constant>(U);
- Constant *&FoldRes = FoldedConstants[C];
- if (!FoldRes)
- FoldRes = ConstantFoldConstant(C, DL, TLI);
- if (!FoldRes)
- FoldRes = C;
-
- if (FoldRes != C) {
- DEBUG(dbgs() << "IC: ConstFold operand of: " << *Inst
- << "\n Old = " << *C
- << "\n New = " << *FoldRes << '\n');
- U = FoldRes;
- MadeIRChange = true;
- }
- }
-
- // Skip processing debug intrinsics in InstCombine. Processing these call instructions
- // consumes non-trivial amount of time and provides no value for the optimization.
- if (!isa<DbgInfoIntrinsic>(Inst))
- InstrsForInstCombineWorklist.push_back(Inst);
- }
-
- // Recursively visit successors. If this is a branch or switch on a
- // constant, only visit the reachable successor.
- TerminatorInst *TI = BB->getTerminator();
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
- bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
- BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
- Worklist.push_back(ReachableBB);
- continue;
- }
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
- Worklist.push_back(SI->findCaseValue(Cond)->getCaseSuccessor());
- continue;
- }
- }
-
- for (BasicBlock *SuccBB : TI->successors())
- Worklist.push_back(SuccBB);
- } while (!Worklist.empty());
-
- // Once we've found all of the instructions to add to instcombine's worklist,
- // add them in reverse order. This way instcombine will visit from the top
- // of the function down. This jives well with the way that it adds all uses
- // of instructions to the worklist after doing a transformation, thus avoiding
- // some N^2 behavior in pathological cases.
- ICWorklist.AddInitialGroup(InstrsForInstCombineWorklist);
-
- return MadeIRChange;
-}
-
-/// \brief Populate the IC worklist from a function, and prune any dead basic
-/// blocks discovered in the process.
-///
-/// This also does basic constant propagation and other forward fixing to make
-/// the combiner itself run much faster.
-static bool prepareICWorklistFromFunction(Function &F, const DataLayout &DL,
- TargetLibraryInfo *TLI,
- InstCombineWorklist &ICWorklist) {
- bool MadeIRChange = false;
-
- // Do a depth-first traversal of the function, populate the worklist with
- // the reachable instructions. Ignore blocks that are not reachable. Keep
- // track of which blocks we visit.
- SmallPtrSet<BasicBlock *, 32> Visited;
- MadeIRChange |=
- AddReachableCodeToWorklist(&F.front(), DL, Visited, ICWorklist, TLI);
-
- // Do a quick scan over the function. If we find any blocks that are
- // unreachable, remove any instructions inside of them. This prevents
- // the instcombine code from having to deal with some bad special cases.
- for (BasicBlock &BB : F) {
- if (Visited.count(&BB))
- continue;
-
- unsigned NumDeadInstInBB = removeAllNonTerminatorAndEHPadInstructions(&BB);
- MadeIRChange |= NumDeadInstInBB > 0;
- NumDeadInst += NumDeadInstInBB;
- }
-
- return MadeIRChange;
-}
-
-static bool combineInstructionsOverFunction(
- Function &F, InstCombineWorklist &Worklist, AliasAnalysis *AA,
- AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT,
- OptimizationRemarkEmitter &ORE, bool ExpensiveCombines = true,
- LoopInfo *LI = nullptr) {
- auto &DL = F.getParent()->getDataLayout();
- ExpensiveCombines |= EnableExpensiveCombines;
-
- /// Builder - This is an IRBuilder that automatically inserts new
- /// instructions into the worklist when they are created.
- IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
- F.getContext(), TargetFolder(DL),
- IRBuilderCallbackInserter([&Worklist, &AC](Instruction *I) {
- Worklist.Add(I);
-
- using namespace llvm::PatternMatch;
- if (match(I, m_Intrinsic<Intrinsic::assume>()))
- AC.registerAssumption(cast<CallInst>(I));
- }));
-
- // Iterate while there is work to do.
- int Iteration = 0;
- bool MadeIRChange = false;
- for (;;) {
- ++Iteration;
- DEBUG(dbgs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
- << F.getName() << "\n");
-
- MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
-
- InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines, AA,
- AC, TLI, DT, ORE, DL, LI);
- IC.MaxArraySizeForCombine = MaxArraySize;
-
- if (!IC.run())
- break;
- }
-
- return MadeIRChange || Iteration > 1;
-}
-
-PreservedAnalyses InstCombinePass::run(Function &F,
- FunctionAnalysisManager &AM) {
- auto &AC = AM.getResult<AssumptionAnalysis>(F);
- auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
- auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
- auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
-
- auto *LI = AM.getCachedResult<LoopAnalysis>(F);
-
- // FIXME: The AliasAnalysis is not yet supported in the new pass manager
- if (!combineInstructionsOverFunction(F, Worklist, nullptr, AC, TLI, DT, ORE,
- ExpensiveCombines, LI))
- // No changes, all analyses are preserved.
- return PreservedAnalyses::all();
-
- // Mark all the analyses that instcombine updates as preserved.
- PreservedAnalyses PA;
- PA.preserveSet<CFGAnalyses>();
- PA.preserve<AAManager>();
- PA.preserve<GlobalsAA>();
- return PA;
-}
-
-void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequired<AAResultsWrapperPass>();
- AU.addRequired<AssumptionCacheTracker>();
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addPreserved<AAResultsWrapperPass>();
- AU.addPreserved<BasicAAWrapperPass>();
- AU.addPreserved<GlobalsAAWrapperPass>();
-}
-
-bool InstructionCombiningPass::runOnFunction(Function &F) {
- if (skipFunction(F))
- return false;
-
- // Required analyses.
- auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
- auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
-
- // Optional analyses.
- auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
- auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
-
- return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, DT, ORE,
- ExpensiveCombines, LI);
-}
-
-char InstructionCombiningPass::ID = 0;
-INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
- "Combine redundant instructions", false, false)
-INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
-INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
- "Combine redundant instructions", false, false)
-
-// Initialization Routines
-void llvm::initializeInstCombine(PassRegistry &Registry) {
- initializeInstructionCombiningPassPass(Registry);
-}
-
-void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
- initializeInstructionCombiningPassPass(*unwrap(R));
-}
-
-FunctionPass *llvm::createInstructionCombiningPass(bool ExpensiveCombines) {
- return new InstructionCombiningPass(ExpensiveCombines);
-}
diff --git a/chromium/tools/clang/scripts/package.py b/chromium/tools/clang/scripts/package.py
index 599c51d8180..a557dfcb29d 100755
--- a/chromium/tools/clang/scripts/package.py
+++ b/chromium/tools/clang/scripts/package.py
@@ -166,7 +166,7 @@ def main():
args = parser.parse_args()
# Check that the script is not going to upload a toolchain built from HEAD.
- use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
+ use_head_revision = bool(int(os.environ.get('LLVM_FORCE_HEAD_REVISION', '0')))
if args.upload and use_head_revision:
print ("--upload and LLVM_FORCE_HEAD_REVISION could not be used "
"at the same time.")
@@ -255,10 +255,11 @@ def main():
'lib/libBlinkGCPlugin.' + so_ext,
])
if sys.platform == 'darwin':
- want.extend([# Copy only the OSX and iossim (ASan and profile) runtime
- # libraries:
+ want.extend([# Copy only the OSX and iossim (ASan, fuzzer and profile)
+ # runtime libraries:
'lib/clang/*/lib/darwin/*asan_osx*',
'lib/clang/*/lib/darwin/*asan_iossim*',
+ 'lib/clang/*/lib/darwin/*fuzzer*',
'lib/clang/*/lib/darwin/*profile_osx*',
'lib/clang/*/lib/darwin/*profile_iossim*',
# And the OSX and ios builtin libraries (iossim is lipo'd into
@@ -271,10 +272,11 @@ def main():
want.append('bin/llvm-ar')
want.append('bin/lld')
# Copy only
- # lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,profile}-*.a ,
+ # lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,fuzzer,profile}-*.a,
# but not dfsan.
want.extend(['lib/clang/*/lib/linux/*[atm]san*',
'lib/clang/*/lib/linux/*ubsan*',
+ 'lib/clang/*/lib/linux/*libclang_rt.fuzzer*',
'lib/clang/*/lib/linux/*libclang_rt.san*',
'lib/clang/*/lib/linux/*profile*',
'lib/clang/*/msan_blacklist.txt',
@@ -331,6 +333,18 @@ def main():
MaybeUpload(args, pdir, platform)
+ # Zip up llvm-code-coverage for code coverage.
+ code_coverage_dir = 'llvm-code-coverage-' + stamp
+ shutil.rmtree(code_coverage_dir, ignore_errors=True)
+ os.makedirs(os.path.join(code_coverage_dir, 'bin'))
+ for filename in ['llvm-cov', 'llvm-profdata']:
+ shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', filename + exe_ext),
+ os.path.join(code_coverage_dir, 'bin'))
+ with tarfile.open(code_coverage_dir + '.tgz', 'w:gz') as tar:
+ tar.add(os.path.join(code_coverage_dir, 'bin'), arcname='bin',
+ filter=PrintTarProgress)
+ MaybeUpload(args, code_coverage_dir, platform)
+
# Zip up llvm-objdump for sanitizer coverage.
objdumpdir = 'llvmobjdump-' + stamp
shutil.rmtree(objdumpdir, ignore_errors=True)
@@ -342,6 +356,18 @@ def main():
filter=PrintTarProgress)
MaybeUpload(args, objdumpdir, platform)
+ # On Mac, lld isn't part of the main zip. Upload it in a separate zip.
+ if sys.platform == 'darwin':
+ llddir = 'lld-' + stamp
+ shutil.rmtree(llddir, ignore_errors=True)
+ os.makedirs(os.path.join(llddir, 'bin'))
+ shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'lld'),
+ os.path.join(llddir, 'bin'))
+ with tarfile.open(llddir + '.tgz', 'w:gz') as tar:
+ tar.add(os.path.join(llddir, 'bin'), arcname='bin',
+ filter=PrintTarProgress)
+ MaybeUpload(args, llddir, platform)
+
# Zip up the translation_unit tool.
translation_unit_dir = 'translation_unit-' + stamp
shutil.rmtree(translation_unit_dir, ignore_errors=True)
diff --git a/chromium/tools/clang/scripts/run_tool.py b/chromium/tools/clang/scripts/run_tool.py
index 98f1cac5d45..e6a889379e5 100755
--- a/chromium/tools/clang/scripts/run_tool.py
+++ b/chromium/tools/clang/scripts/run_tool.py
@@ -262,14 +262,17 @@ def main():
parser.add_argument(
'--tool-args', nargs='*',
help='optional arguments passed to the tool')
+ parser.add_argument(
+ '--tool-path', nargs='?',
+ help='optional path to the tool directory')
args = parser.parse_args(argv)
- os.environ['PATH'] = '%s%s%s' % (
- os.path.abspath(os.path.join(
+ if args.tool_path:
+ tool_path = os.path.abspath(args.tool_path)
+ else:
+ tool_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
- '../../../third_party/llvm-build/Release+Asserts/bin')),
- os.pathsep,
- os.environ['PATH'])
+ '../../../third_party/llvm-build/Release+Asserts/bin'))
if args.generate_compdb:
with open(os.path.join(args.p, 'compile_commands.json'), 'w') as f:
@@ -298,7 +301,8 @@ def main():
print 'Shard %d-of-%d will process %d entries out of %d' % (
shard_number, shard_count, len(source_filenames), total_length)
- dispatcher = _CompilerDispatcher(args.tool, args.tool_args,
+ dispatcher = _CompilerDispatcher(os.path.join(tool_path, args.tool),
+ args.tool_args,
args.p,
source_filenames)
dispatcher.Run()
diff --git a/chromium/tools/clang/scripts/test_tool.py b/chromium/tools/clang/scripts/test_tool.py
index 58958ff1f04..82528dcdd0a 100755
--- a/chromium/tools/clang/scripts/test_tool.py
+++ b/chromium/tools/clang/scripts/test_tool.py
@@ -33,7 +33,7 @@ def _GenerateCompileCommands(files, include_paths):
include_path_flags = ' '.join('-I %s' % include_path.replace('\\', '/')
for include_path in include_paths)
return json.dumps([{'directory': os.path.dirname(f),
- 'command': 'clang++ -std=c++11 -fsyntax-only %s -c %s' % (
+ 'command': 'clang++ -std=c++14 -fsyntax-only %s -c %s' % (
include_path_flags, os.path.basename(f)),
'file': os.path.basename(f)} for f in files], indent=2)
@@ -186,9 +186,9 @@ def main(argv):
print '[ RUN ] %s' % os.path.relpath(actual)
expected_output = actual_output = None
with open(expected, 'r') as f:
- expected_output = f.readlines()
+ expected_output = f.read().splitlines()
with open(actual, 'r') as f:
- actual_output = f.readlines()
+ actual_output = f.read().splitlines()
if actual_output != expected_output:
failed += 1
for line in difflib.unified_diff(expected_output, actual_output,
diff --git a/chromium/tools/clang/scripts/update.py b/chromium/tools/clang/scripts/update.py
index 36422fc6cd9..190f0e41f35 100755
--- a/chromium/tools/clang/scripts/update.py
+++ b/chromium/tools/clang/scripts/update.py
@@ -27,14 +27,15 @@ import zipfile
# Do NOT CHANGE this if you don't know what you're doing -- see
# https://chromium.googlesource.com/chromium/src/+/master/docs/updating_clang.md
# Reverting problematic clang rolls is safe, though.
-CLANG_REVISION = '310694'
+CLANG_REVISION = '313786'
-use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
+use_head_revision = bool(os.environ.get('LLVM_FORCE_HEAD_REVISION', '0')
+ in ('1', 'YES'))
if use_head_revision:
CLANG_REVISION = 'HEAD'
# This is incremented when pushing a new build of Clang at the same revision.
-CLANG_SUB_REVISION=2
+CLANG_SUB_REVISION=1
PACKAGE_VERSION = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION)
@@ -388,12 +389,11 @@ def VeryifyVersionOfBuiltClangMatchesVERSION():
def UpdateClang(args):
- print 'Updating Clang to %s...' % PACKAGE_VERSION
-
if ReadStampFile() == PACKAGE_VERSION and not args.force_local_build:
- print 'Clang is already up to date.'
return 0
+ print 'Updating Clang to %s...' % PACKAGE_VERSION
+
# Reset the stamp file in case the build is unsuccessful.
WriteStampFile('')
@@ -439,10 +439,8 @@ def UpdateClang(args):
Checkout('LLVM', LLVM_REPO_URL + '/llvm/trunk', LLVM_DIR)
- # Apply https://reviews.llvm.org/D36596 locally to see how it does.
- # If you roll clang, you need to change this in some not yet clear way,
- # see https://crbug.com/755777
- assert use_head_revision or CLANG_REVISION == '310694'
+ # Back out previous local patches. This needs to be kept around a bit
+ # until all bots have cycled. See https://crbug.com/755777.
files = [
'lib/Transforms/InstCombine/InstructionCombining.cpp',
'test/DebugInfo/X86/formal_parameter.ll',
@@ -451,18 +449,11 @@ def UpdateClang(args):
'test/Transforms/InstCombine/debuginfo.ll',
'test/Transforms/Util/simplify-dbg-declare-load.ll',
]
- if use_head_revision:
- for f in [os.path.join(LLVM_DIR, f) for f in files[1:]]:
- RunCommand(['svn', 'revert', f])
- else:
- for f in [os.path.join(LLVM_DIR, f) for f in files[1:]]:
- if os.path.exists(f):
- os.remove(f)
- shutil.copy(os.path.join(THIS_DIR, 'InstructionCombining.cpp'),
- os.path.join(LLVM_DIR, files[0]))
+ for f in [os.path.join(LLVM_DIR, f) for f in files]:
+ RunCommand(['svn', 'revert', f])
Checkout('Clang', LLVM_REPO_URL + '/cfe/trunk', CLANG_DIR)
- if sys.platform != 'darwin':
+ if True:
Checkout('LLD', LLVM_REPO_URL + '/lld/trunk', LLD_DIR)
elif os.path.exists(LLD_DIR):
# In case someone sends a tryjob that temporary adds lld to the checkout,
@@ -605,9 +596,10 @@ def UpdateClang(args):
# Build PDBs for archival on Windows. Don't use RelWithDebInfo since it
# has different optimization defaults than Release.
+ # Also disable stack cookies (/GS-) for performance.
if sys.platform == 'win32':
- cflags += ['/Zi']
- cxxflags += ['/Zi']
+ cflags += ['/Zi', '/GS-']
+ cxxflags += ['/Zi', '/GS-']
ldflags += ['/DEBUG', '/OPT:REF', '/OPT:ICF']
CreateChromeToolsShim()
@@ -652,11 +644,16 @@ def UpdateClang(args):
# If any Chromium tools were built, install those now.
RunCommand(['ninja', 'cr-install'], msvc_arch='x64')
- if sys.platform == 'darwin':
- # See http://crbug.com/256342
- RunCommand(['strip', '-x', os.path.join(LLVM_BUILD_DIR, 'bin', 'clang')])
- elif sys.platform.startswith('linux'):
- RunCommand(['strip', os.path.join(LLVM_BUILD_DIR, 'bin', 'clang')])
+ stripped_binaries = ['clang', 'llvm-symbolizer', 'sancov']
+ if sys.platform.startswith('linux'):
+ stripped_binaries.append('lld')
+ stripped_binaries.append('llvm-ar')
+ for f in stripped_binaries:
+ if sys.platform == 'darwin':
+ # See http://crbug.com/256342
+ RunCommand(['strip', '-x', os.path.join(LLVM_BUILD_DIR, 'bin', f)])
+ elif sys.platform.startswith('linux'):
+ RunCommand(['strip', os.path.join(LLVM_BUILD_DIR, 'bin', f)])
VeryifyVersionOfBuiltClangMatchesVERSION()
@@ -692,6 +689,8 @@ def UpdateClang(args):
[LLVM_DIR if sys.platform == 'win32' else COMPILER_RT_DIR],
msvc_arch='x86', env=deployment_env)
RunCommand(['ninja', 'compiler-rt'], msvc_arch='x86')
+ if sys.platform != 'win32':
+ RunCommand(['ninja', 'fuzzer'])
# Copy select output to the main tree.
# TODO(hans): Make this (and the .gypi and .isolate files) version number
@@ -800,6 +799,7 @@ def main():
parser = argparse.ArgumentParser(description='Build Clang.')
parser.add_argument('--bootstrap', action='store_true',
help='first build clang with CC, then with itself.')
+ # TODO(phajdan.jr): remove --if-needed after fixing callers. It's no-op.
parser.add_argument('--if-needed', action='store_true',
help="run only if the script thinks clang is needed")
parser.add_argument('--force-local-build', action='store_true',
@@ -833,12 +833,6 @@ def main():
print '--lto-lld is only effective on Linux. Ignoring the option.'
args.lto_lld = False
- if args.if_needed:
- # TODO(thakis): Can probably remove this and --if-needed altogether.
- if re.search(r'\b(make_clang_dir)=', os.environ.get('GYP_DEFINES', '')):
- print 'Skipping Clang update (make_clang_dir= was set in GYP_DEFINES).'
- return 0
-
# Get svn if we're going to use it to check the revision or do a local build.
if (use_head_revision or args.llvm_force_head_revision or
args.force_local_build):
diff --git a/chromium/tools/clang/traffic_annotation_extractor/traffic_annotation_extractor.cpp b/chromium/tools/clang/traffic_annotation_extractor/traffic_annotation_extractor.cpp
index d958b1dab9b..5015932fd5d 100644
--- a/chromium/tools/clang/traffic_annotation_extractor/traffic_annotation_extractor.cpp
+++ b/chromium/tools/clang/traffic_annotation_extractor/traffic_annotation_extractor.cpp
@@ -206,7 +206,8 @@ class NetworkAnnotationTagCallback : public MatchFinder::MatchCallback {
// API. These functions are all defined in
// 'net/traffic_annotation/network_traffic_annotation.h'.
bool IsAPIFunction(const std::string& function_name) {
- return function_name == "net::DefineNetworkTrafficAnnotation" ||
+ return function_name == "net::NetworkTrafficAnnotationTag::NotReached" ||
+ function_name == "net::DefineNetworkTrafficAnnotation" ||
function_name == "net::DefinePartialNetworkTrafficAnnotation" ||
function_name == "net::CompleteNetworkTrafficAnnotation" ||
function_name == "net::BranchedCompleteNetworkTrafficAnnotation" ||
diff --git a/chromium/tools/cygprofile/BUILD.gn b/chromium/tools/cygprofile/BUILD.gn
index f6408695d13..fc4015a5f3e 100644
--- a/chromium/tools/cygprofile/BUILD.gn
+++ b/chromium/tools/cygprofile/BUILD.gn
@@ -35,3 +35,21 @@ executable("cygprofile_unittests") {
"//testing/gtest",
]
}
+
+executable("cygprofile_perftests") {
+ testonly = true
+
+ sources = [
+ "cygprofile_perftest.cc",
+ ]
+
+ configs -= [ "//build/config/android:default_cygprofile_instrumentation" ]
+ configs += [ "//build/config/android:no_cygprofile_instrumentation" ]
+
+ deps = [
+ ":cygprofile",
+ "//base",
+ "//testing/gtest",
+ "//testing/perf",
+ ]
+}
diff --git a/chromium/tools/cygprofile/cyglog_to_orderfile.py b/chromium/tools/cygprofile/cyglog_to_orderfile.py
index bc382f60e66..cd460fac93a 100755
--- a/chromium/tools/cygprofile/cyglog_to_orderfile.py
+++ b/chromium/tools/cygprofile/cyglog_to_orderfile.py
@@ -102,7 +102,7 @@ def _FindSymbolInfosAtOffset(offset_to_symbol_infos, offset):
raise SymbolNotFoundException(offset)
-def _GetObjectFileNames(obj_dir):
+def GetObjectFileNames(obj_dir):
"""Returns the list of object files in a directory."""
obj_files = []
for (dirpath, _, filenames) in os.walk(obj_dir):
@@ -155,7 +155,7 @@ def GetSymbolToSectionsMapFromObjectFiles(obj_dir):
Returns:
A map {symbol_name: [section_name1, section_name2...]}
"""
- object_files = _GetObjectFileNames(obj_dir)
+ object_files = GetObjectFileNames(obj_dir)
symbol_to_sections_map = {}
symbol_warnings = cygprofile_utils.WarningCollector(300)
symbol_infos = _AllSymbolInfos(object_files)
diff --git a/chromium/tools/cygprofile/cygprofile.cc b/chromium/tools/cygprofile/cygprofile.cc
index fe691963f41..7c04fed84b3 100644
--- a/chromium/tools/cygprofile/cygprofile.cc
+++ b/chromium/tools/cygprofile/cygprofile.cc
@@ -201,25 +201,21 @@ class Thread {
};
// Single log entry recorded for each function call.
-LogEntry::LogEntry(const void* address)
- : time(GetCurrentTime()),
- pid(getpid()),
- tid(GetTID()),
- address(address) {
-}
+LogEntry::LogEntry(const void* address, pid_t pid, pid_t tid)
+ : time(GetCurrentTime()), pid(pid), tid(tid), address(address) {}
ThreadLog::ThreadLog()
- : tid_(GetTID()),
- in_use_(false),
- flush_callback_(
- base::Bind(&ThreadLog::FlushInternal, base::Unretained(this))) {
-}
+ : pid_(getpid()),
+ tid_(GetTID()),
+ in_use_(false),
+ flush_callback_(
+ base::Bind(&ThreadLog::FlushInternal, base::Unretained(this))) {}
ThreadLog::ThreadLog(const FlushCallback& flush_callback)
- : tid_(GetTID()),
- in_use_(false),
- flush_callback_(flush_callback) {
-}
+ : pid_(getpid()),
+ tid_(GetTID()),
+ in_use_(false),
+ flush_callback_(flush_callback) {}
ThreadLog::~ThreadLog() {
PCHECK(0 == pthread_setspecific(g_tls_slot, NULL));
@@ -230,14 +226,12 @@ void ThreadLog::AddEntry(void* address) {
return;
in_use_ = true;
- CHECK_EQ(tid_, GetTID());
- const std::pair<base::hash_set<void*>::iterator, bool> pair =
- called_functions_.insert(address);
- const bool did_insert = pair.second;
+ DCHECK_EQ(tid_, GetTID());
+ bool did_insert = called_functions_.insert(address).second;
if (did_insert) {
base::AutoLock auto_lock(lock_);
- entries_.push_back(LogEntry(address));
+ entries_.emplace_back(address, pid_, tid_);
// Crash in a quickly understandable way instead of crashing (or maybe not
// though) due to OOM.
CHECK_LE(entries_.size(), kMaxBufferSize);
diff --git a/chromium/tools/cygprofile/cygprofile.h b/chromium/tools/cygprofile/cygprofile.h
index 7b25acc644b..97684e7858c 100644
--- a/chromium/tools/cygprofile/cygprofile.h
+++ b/chromium/tools/cygprofile/cygprofile.h
@@ -64,7 +64,7 @@ class Thread;
// Single log entry recorded for each function call.
struct LogEntry {
- LogEntry(const void* address);
+ LogEntry(const void* address, pid_t pid, pid_t tid);
const timespec time;
const pid_t pid;
@@ -100,6 +100,9 @@ class ThreadLog {
// above.
void FlushInternal(std::vector<LogEntry>* entries) const;
+ // Process ID, as returned by getpid().
+ const pid_t pid_;
+
// Thread identifier as Linux kernel shows it. LWP (light-weight process) is
// a unique ID of the thread in the system, unlike pthread_self() which is the
// same for fork()-ed threads.
diff --git a/chromium/tools/cygprofile/cygprofile_perftest.cc b/chromium/tools/cygprofile/cygprofile_perftest.cc
new file mode 100644
index 00000000000..ae8fe3e7a3d
--- /dev/null
+++ b/chromium/tools/cygprofile/cygprofile_perftest.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/cygprofile/cygprofile.h"
+
+#include <cstdint>
+#include <vector>
+
+#include "base/strings/stringprintf.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+namespace cygprofile {
+
+namespace {
+
+void AddEntryCost(int iterations, int addresses_count) {
+ // This is intentionally leaky. ThreadLog() destructor would call abort(),
+ // limiting us to a single test. Leaking ThreadLog is fine as long as we clean
+ // up the entries.
+ auto* thread_log = new ThreadLog();
+
+ auto tick = base::TimeTicks::Now();
+ for (int i = 0; i < iterations; i++) {
+ for (int address = 0; address < addresses_count; address++) {
+ thread_log->AddEntry(reinterpret_cast<void*>(address));
+ }
+ }
+ auto tock = base::TimeTicks::Now();
+ double nanos = static_cast<double>((tock - tick).InNanoseconds());
+ auto ns_per_call =
+ nanos / (iterations * static_cast<double>(addresses_count));
+ auto modifier = base::StringPrintf("_%d_%d", iterations, addresses_count);
+ perf_test::PrintResult("AddEntryCostPerCall", modifier, "", ns_per_call, "ns",
+ true);
+
+ // Entries cleanup, see comment at the beginning of the function.
+ std::vector<LogEntry> entries;
+ thread_log->TakeEntries(&entries);
+}
+} // namespace
+
+TEST(CygprofilePerfTest, CreateEntries_10_10000) {
+ AddEntryCost(10, 10000);
+}
+
+TEST(CygprofilePerfTest, CreateEntries_100_10000) {
+ AddEntryCost(100, 10000);
+}
+
+TEST(CygprofilePerfTest, CreateEntries_10_100000) {
+ AddEntryCost(10, 100000);
+}
+
+TEST(CygprofilePerfTest, CreateEntries_100_1000000) {
+ AddEntryCost(100, 100000);
+}
+
+} // namespace cygprofile
+
+// Custom runner implementation since base's one requires JNI on Android.
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/chromium/tools/cygprofile/memory_top_10_mobile_000.wprgo.sha1 b/chromium/tools/cygprofile/memory_top_10_mobile_000.wprgo.sha1
new file mode 100644
index 00000000000..f10033880ff
--- /dev/null
+++ b/chromium/tools/cygprofile/memory_top_10_mobile_000.wprgo.sha1
@@ -0,0 +1 @@
+254c66be2ae74962a2e4d6a813af6f43f86c708a \ No newline at end of file
diff --git a/chromium/tools/cygprofile/profile_android_startup.py b/chromium/tools/cygprofile/profile_android_startup.py
index c33b60b3b08..6c5c29a7e9f 100755
--- a/chromium/tools/cygprofile/profile_android_startup.py
+++ b/chromium/tools/cygprofile/profile_android_startup.py
@@ -35,12 +35,8 @@ from pylib import constants
sys.path.append(os.path.join(sys.path[0], '..', '..', 'tools', 'perf'))
from core import path_util
sys.path.append(path_util.GetTelemetryDir())
-from telemetry.internal.util import wpr_server
-
-sys.path.append(os.path.join(sys.path[0], '..', '..',
- 'third_party', 'webpagereplay'))
-import adb_install_cert
-import certutils
+from telemetry.internal.util import webpagereplay_go_server
+from telemetry.internal.util import binary_manager
class NoCyglogDataError(Exception):
@@ -72,23 +68,20 @@ class WprManager(object):
_WPR_BUCKET = 'chrome-partner-telemetry'
- def __init__(self, wpr_archive, device, cmdline_file):
+ def __init__(self, wpr_archive, device, cmdline_file, package):
self._device = device
self._wpr_archive = wpr_archive
self._wpr_archive_hash = wpr_archive + '.sha1'
self._cmdline_file = cmdline_file
self._wpr_server = None
- self._wpr_ca_cert_path = None
- self._device_cert_util = None
self._host_http_port = None
self._host_https_port = None
- self._is_test_ca_installed = False
self._flag_changer = None
+ self._package = package
def Start(self):
"""Set up the device and host for WPR."""
self.Stop()
- # TODO(lizeb,pasko): make self._InstallTestCa() work
self._BringUpWpr()
self._StartForwarder()
@@ -96,7 +89,6 @@ class WprManager(object):
"""Clean up the device and host's WPR setup."""
self._StopForwarder()
self._StopWpr()
- # TODO(lizeb,pasko): make self._RemoveTestCa() work
def __enter__(self):
self.Start()
@@ -104,51 +96,14 @@ class WprManager(object):
def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
self.Stop()
- def _InstallTestCa(self):
- """Generates and deploys a test certificate authority."""
- print 'Installing test certificate authority on device: %s' % (
- self._device.adb.GetDeviceSerial())
- self._wpr_ca_cert_path = os.path.join(tempfile.mkdtemp(), 'testca.pem')
- certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
- cert_path=self._wpr_ca_cert_path)
- self._device_cert_util = adb_install_cert.AndroidCertInstaller(
- self._device.adb.GetDeviceSerial(), None, self._wpr_ca_cert_path)
- self._device_cert_util.install_cert(overwrite_cert=True)
- self._is_test_ca_installed = True
-
- def _RemoveTestCa(self):
- """Remove root CA generated by previous call to InstallTestCa().
-
- Removes the test root certificate from both the device and host machine.
- """
- print 'Cleaning up test CA...'
- if not self._wpr_ca_cert_path:
- return
-
- if self._is_test_ca_installed:
- try:
- self._device_cert_util.remove_cert()
- except Exception:
- # Best effort cleanup - show the error and continue.
- logging.error(
- 'Error while trying to remove certificate authority: %s. '
- % self._adb.device_serial())
- self._is_test_ca_installed = False
-
- shutil.rmtree(os.path.dirname(self._wpr_ca_cert_path), ignore_errors=True)
- self._wpr_ca_cert_path = None
- self._device_cert_util = None
-
def _BringUpWpr(self):
"""Start the WPR server on the host and the forwarder on the device."""
print 'Starting WPR on host...'
_DownloadFromCloudStorage(self._WPR_BUCKET, self._wpr_archive_hash)
- args = ['--use_closest_match']
- if self._is_test_ca_installed:
- args.extend(['--should_generate_certs',
- '--https_root_ca_cert_path=' + self._wpr_ca_cert_path])
- self._wpr_server = wpr_server.ReplayServer(self._wpr_archive,
- '127.0.0.1', 0, 0, None, args)
+ if binary_manager.NeedsInit():
+ binary_manager.InitDependencyManager([])
+ self._wpr_server = webpagereplay_go_server.ReplayServer(self._wpr_archive,
+ '127.0.0.1', 0, 0, replay_options=[])
ports = self._wpr_server.StartServer()[:-1]
self._host_http_port = ports[0]
self._host_https_port = ports[1]
@@ -180,14 +135,27 @@ class WprManager(object):
self._flag_changer.AddFlags([
'--host-resolver-rules=MAP * 127.0.0.1,EXCLUDE localhost',
'--testing-fixed-http-port=%s' % device_http,
- '--testing-fixed-https-port=%s' % device_https])
+ '--testing-fixed-https-port=%s' % device_https,
+
+ # Allows to selectively avoid certificate errors in Chrome. Unlike
+ # --ignore-certificate-errors this allows exercising the HTTP disk cache
+ # and avoids re-establishing socket connections. The value is taken from
+ # the WprGo documentation at:
+ # https://github.com/catapult-project/catapult/blob/master/web_page_replay_go/README.md
+ '--ignore-certificate-errors-spki-list=' +
+ 'PhrPvGIaAMmd29hj8BCZOq096yj7uMpRNHpn5PDxI6I=',
+
+ # The flag --ignore-certificate-errors-spki-list (above) requires
+ # specifying the profile directory, otherwise it is silently ignored.
+ '--user-data-dir=/data/data/{}'.format(self._package)])
def _StopForwarder(self):
"""Shuts down the port forwarding service."""
- print 'Stopping device forwarder...'
if self._flag_changer:
+ print 'Restoring flags while stopping forwarder, but why?...'
self._flag_changer.Restore()
self._flag_changer = None
+ print 'Stopping device forwarder...'
forwarder.Forwarder.UnmapAllDevicePorts(self._device)
@@ -203,8 +171,7 @@ class AndroidProfileTool(object):
# TEST_URL must be a url in the WPR_ARCHIVE.
_TEST_URL = 'https://www.google.com/#hl=en&q=science'
_WPR_ARCHIVE = os.path.join(
- constants.DIR_SOURCE_ROOT, 'tools', 'perf', 'page_sets', 'data',
- 'top_10_mobile_002.wpr')
+ os.path.dirname(__file__), 'memory_top_10_mobile_000.wprgo')
# TODO(jbudorick): Make host_cyglog_dir mandatory after updating
# downstream clients. See crbug.com/639831 for context.
@@ -262,7 +229,7 @@ class AndroidProfileTool(object):
self._KillChrome(package_info)
self._SetUpDeviceFolders()
with WprManager(self._WPR_ARCHIVE, self._device,
- package_info.cmdline_file):
+ package_info.cmdline_file, package_info.package):
self._StartChrome(package_info, self._TEST_URL)
time.sleep(90)
self._KillChrome(package_info)
diff --git a/chromium/tools/determinism/deterministic_build_whitelist.pyl b/chromium/tools/determinism/deterministic_build_whitelist.pyl
index 4d7114ab4df..8a4c4e33667 100644
--- a/chromium/tools/determinism/deterministic_build_whitelist.pyl
+++ b/chromium/tools/determinism/deterministic_build_whitelist.pyl
@@ -21,6 +21,7 @@
# https://crbug.com/330263
'linux': [
+ 'ppapi_nacl_tests_pnacl_newlib_x64.nexe',
],
# https://crbug.com/330262
diff --git a/chromium/tools/fuchsia/OWNERS b/chromium/tools/fuchsia/OWNERS
new file mode 100644
index 00000000000..e7034eabb1e
--- /dev/null
+++ b/chromium/tools/fuchsia/OWNERS
@@ -0,0 +1 @@
+file://build/fuchsia/OWNERS
diff --git a/chromium/tools/fuchsia/local-sdk.py b/chromium/tools/fuchsia/local-sdk.py
index 5e747dcba09..2cdda646479 100755
--- a/chromium/tools/fuchsia/local-sdk.py
+++ b/chromium/tools/fuchsia/local-sdk.py
@@ -30,6 +30,13 @@ def EnsureEmptyDir(path):
os.makedirs(path)
+def BuildForArch(arch):
+ Run('scripts/build-zircon.sh', '-t', arch)
+ Run('packages/gn/gen.py', '--target_cpu=' + arch, '--modules=packages/gn/sdk',
+ '--ignore-skia', '--release')
+ Run('buildtools/ninja', '-C', 'out/release-' + arch)
+
+
def main(args):
if len(args) != 1 or not os.path.isdir(args[0]):
print 'usage: %s <path_to_fuchsia_tree>' % SELF_FILE
@@ -41,11 +48,10 @@ def main(args):
# Switch to the Fuchsia tree and build an SDK.
os.chdir(fuchsia_root)
- Run('scripts/build-magenta.sh', '-t', 'x86_64')
- Run('scripts/build-magenta.sh', '-t', 'aarch64')
- Run('packages/gn/gen.py', '--target_cpu=x86-64', '--modules=sdk',
- '--ignore-skia', '--release')
- Run('packages/gn/build.py', '--release')
+
+ BuildForArch('x86-64')
+ BuildForArch('aarch64')
+
tempdir = tempfile.mkdtemp()
sdk_tar = os.path.join(tempdir, 'fuchsia-sdk.tgz')
Run('go', 'run', 'scripts/makesdk.go', '-output', sdk_tar, '.')
diff --git a/chromium/tools/fuchsia/run-swarmed.py b/chromium/tools/fuchsia/run-swarmed.py
new file mode 100755
index 00000000000..ec6c42bfcd7
--- /dev/null
+++ b/chromium/tools/fuchsia/run-swarmed.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs a Fuchsia gtest-based test on Swarming, optionally many times,
+collecting the output of the runs into a directory. Useful for flake checking,
+and faster than using trybots by avoiding repeated bot_update, compile, archive,
+etc. and allowing greater parallelism.
+
+To use, run in a new shell (it blocks until all Swarming jobs complete):
+
+ tools/fuchsia/run-swarmed.py -t content_unittests --out-dir=out/fuch
+
+The logs of the runs will be stored in results/ (or specify a results directory
+with --results=some_dir). You can then do something like `grep -L SUCCESS
+results/*` to find the tests that failed or otherwise process the log files.
+"""
+
+import argparse
+import multiprocessing
+import os
+import shutil
+import subprocess
+import sys
+
+
+INTERNAL_ERROR_EXIT_CODE = -1000
+
+
+def _Spawn(args):
+ """Triggers a swarming job. The arguments passed are:
+ - The index of the job;
+ - The command line arguments object;
+ - The hash of the isolate job used to trigger.
+
+ The return value is passed to a collect-style map() and consists of:
+ - The index of the job;
+ - The json file created by triggering and used to collect results;
+ - The command line arguments object.
+ """
+ index, args, isolated_hash = args
+ json_file = os.path.join(args.results, '%d.json' % index)
+ trigger_args = [
+ 'tools/swarming_client/swarming.py', 'trigger',
+ '-S', 'https://chromium-swarm.appspot.com',
+ '-I', 'https://isolateserver.appspot.com',
+ '-d', 'os', 'Linux',
+ '-d', 'pool', 'Chrome',
+ '-d', 'kvm', '1',
+ '-s', isolated_hash,
+ '--dump-json', json_file,
+ '--',
+ '--test-launcher-summary-output=${ISOLATED_OUTDIR}/output.json']
+ filter_file = \
+ 'testing/buildbot/filters/fuchsia.' + args.test_name + '.filter'
+ if os.path.isfile(filter_file):
+ trigger_args.append('--test-launcher-filter-file=../../' + filter_file)
+ with open(os.devnull, 'w') as nul:
+ subprocess.check_call(trigger_args, stdout=nul)
+ return (index, json_file, args)
+
+
+def _Collect(spawn_result):
+ index, json_file, args = spawn_result
+ p = subprocess.Popen([
+ 'tools/swarming_client/swarming.py', 'collect',
+ '-S', 'https://chromium-swarm.appspot.com',
+ '--json', json_file,
+ '--task-output-stdout=console'],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ stdout = p.communicate()[0]
+ if p.returncode != 0 and len(stdout) < 2**10 and 'Internal error!' in stdout:
+ exit_code = INTERNAL_ERROR_EXIT_CODE
+ file_suffix = '.INTERNAL_ERROR'
+ else:
+ exit_code = p.returncode
+ file_suffix = '' if exit_code == 0 else '.FAILED'
+ filename = '%d%s.stdout.txt' % (index, file_suffix)
+ with open(os.path.join(args.results, filename), 'w') as f:
+ f.write(stdout)
+ return exit_code
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--out-dir', default='out/fuch', help='Build directory.')
+ parser.add_argument('--test-name', '-t', required=True,
+ help='Name of test to run.')
+ parser.add_argument('--copies', '-n', type=int, default=1,
+ help='Number of copies to spawn.')
+ parser.add_argument('--results', '-r', default='results',
+ help='Directory in which to store results.')
+ args = parser.parse_args()
+
+ subprocess.check_call(
+ ['tools/mb/mb.py', 'isolate', '//' + args.out_dir, args.test_name])
+
+ print 'If you get authentication errors, follow:'
+ print ' https://www.chromium.org/developers/testing/isolated-testing/for-swes#TOC-Login-on-the-services'
+
+ print 'Uploading to isolate server, this can take a while...'
+ archive_output = subprocess.check_output(
+ ['tools/swarming_client/isolate.py', 'archive',
+ '-I', 'https://isolateserver.appspot.com',
+ '-i', os.path.join(args.out_dir, args.test_name + '.isolate'),
+ '-s', os.path.join(args.out_dir, args.test_name + '.isolated')])
+ isolated_hash = archive_output.split()[0]
+
+ if os.path.isdir(args.results):
+ shutil.rmtree(args.results)
+ os.makedirs(args.results)
+
+ try:
+ print 'Triggering %d tasks...' % args.copies
+ pool = multiprocessing.Pool()
+ spawn_args = zip(range(args.copies),
+ [args] * args.copies,
+ [isolated_hash] * args.copies)
+ spawn_results = pool.imap_unordered(_Spawn, spawn_args)
+
+ exit_codes = []
+ collect_results = pool.imap_unordered(_Collect, spawn_results)
+ for result in collect_results:
+ exit_codes.append(result)
+ successes = sum(1 for x in exit_codes if x == 0)
+ errors = sum(1 for x in exit_codes if x == INTERNAL_ERROR_EXIT_CODE)
+ failures = len(exit_codes) - successes - errors
+ clear_to_eol = '\033[K'
+ print('\r[%d/%d] collected: '
+ '%d successes, %d failures, %d bot errors...%s' % (len(exit_codes),
+ args.copies, successes, failures, errors, clear_to_eol)),
+ sys.stdout.flush()
+
+ print
+ print 'Results logs collected into', os.path.abspath(args.results) + '.'
+ finally:
+ pool.close()
+ pool.join()
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/chromium/tools/gdb/gdb_chrome.py b/chromium/tools/gdb/gdb_chrome.py
index 67e333f2ea4..ef42a2c5679 100644
--- a/chromium/tools/gdb/gdb_chrome.py
+++ b/chromium/tools/gdb/gdb_chrome.py
@@ -155,7 +155,7 @@ class LocationPrinter(Printer):
return '%s()@%s:%s' % (self.val['function_name_'].string(),
self.val['file_name_'].string(),
self.val['line_number_'])
-pp_set.add_printer('tracked_objects::Location', '^tracked_objects::Location$',
+pp_set.add_printer('base::Location', '^base::Location$',
LocationPrinter)
diff --git a/chromium/tools/gn/analyzer_unittest.cc b/chromium/tools/gn/analyzer_unittest.cc
index abbb96e21f1..925b6eabbf7 100644
--- a/chromium/tools/gn/analyzer_unittest.cc
+++ b/chromium/tools/gn/analyzer_unittest.cc
@@ -113,90 +113,88 @@ class AnalyzerTest : public testing::Test {
} // namespace
-// TODO: clean this up when raw string literals are allowed.
-
TEST_F(AnalyzerTest, AllWasPruned) {
RunBasicTest(
- "{"
- " \"files\": [ \"//d/b.cc\" ],"
- " \"additional_compile_targets\": [ \"all\" ],"
- " \"test_targets\": [ ]"
- "}",
- "{"
- "\"compile_targets\":[\"//d:b_unittests\",\"//d:c\"],"
- "\"status\":\"Found dependency\","
- "\"test_targets\":[]"
+ R"({
+ "files": [ "//d/b.cc" ],
+ "additional_compile_targets": [ "all" ],
+ "test_targets": [ ]
+ })",
+ "{"
+ R"("compile_targets":["//d:b_unittests","//d:c"],)"
+ R"("status":"Found dependency",)"
+ R"("test_targets":[])"
"}");
}
TEST_F(AnalyzerTest, NoDependency) {
RunBasicTest(
- "{"
- " \"files\":[ \"//missing.cc\" ],"
- " \"additional_compile_targets\": [ \"all\" ],"
- " \"test_targets\": [ \"//:a\" ]"
- "}",
- "{"
- "\"compile_targets\":[],"
- "\"status\":\"No dependency\","
- "\"test_targets\":[]"
+ R"({
+ "files":[ "//missing.cc" ],
+ "additional_compile_targets": [ "all" ],
+ "test_targets": [ "//:a" ]
+ })",
+ "{"
+ R"("compile_targets":[],)"
+ R"("status":"No dependency",)"
+ R"("test_targets":[])"
"}");
}
TEST_F(AnalyzerTest, NoFilesNoTargets) {
RunBasicTest(
- "{"
- " \"files\": [],"
- " \"additional_compile_targets\": [],"
- " \"test_targets\": []"
- "}",
- "{"
- "\"compile_targets\":[],"
- "\"status\":\"No dependency\","
- "\"test_targets\":[]"
+ R"({
+ "files": [],
+ "additional_compile_targets": [],
+ "test_targets": []
+ })",
+ "{"
+ R"("compile_targets":[],)"
+ R"("status":"No dependency",)"
+ R"("test_targets":[])"
"}");
}
TEST_F(AnalyzerTest, OneTestTargetModified) {
RunBasicTest(
- "{"
- " \"files\": [ \"//a.cc\" ],"
- " \"additional_compile_targets\": [],"
- " \"test_targets\": [ \"//:a\" ]"
- "}",
- "{"
- "\"compile_targets\":[],"
- "\"status\":\"Found dependency\","
- "\"test_targets\":[\"//:a\"]"
+ R"({
+ "files": [ "//a.cc" ],
+ "additional_compile_targets": [],
+ "test_targets": [ "//:a" ]
+ })",
+ "{"
+ R"("compile_targets":[],)"
+ R"("status":"Found dependency",)"
+ R"("test_targets":["//:a"])"
"}");
}
TEST_F(AnalyzerTest, FilesArentSourceAbsolute) {
RunBasicTest(
- "{"
- " \"files\": [ \"a.cc\" ],"
- " \"additional_compile_targets\": [],"
- " \"test_targets\": [ \"//:a\" ]"
- "}",
- "{"
- "\"error\":"
- "\"\\\"a.cc\\\" is not a source-absolute or absolute path.\","
- "\"invalid_targets\":[]"
+ R"({
+ "files": [ "a.cc" ],
+ "additional_compile_targets": [],
+ "test_targets": [ "//:a" ]
+ })",
+ "{"
+ R"("error":)"
+ R"("\"a.cc\" is not a source-absolute or absolute path.",)"
+ R"("invalid_targets":[])"
"}");
}
TEST_F(AnalyzerTest, WrongInputFields) {
RunBasicTest(
- "{"
- " \"files\": [ \"//a.cc\" ],"
- " \"compile_targets\": [],"
- " \"test_targets\": [ \"//:a\" ]"
- "}",
- "{"
- "\"error\":"
- "\"Input does not have a key named "
- "\\\"additional_compile_targets\\\" with a list value.\","
- "\"invalid_targets\":[]"
+ R"({
+ "files": [ "//a.cc" ],
+ "compile_targets": [],
+ "test_targets": [ "//:a" ]
+ })",
+ "{"
+ R"("error":)"
+ R"("Input does not have a key named )"
+ R"(\"additional_compile_targets\" with a list value.",)"
+ R"("invalid_targets":[])"
"}");
}
@@ -205,15 +203,15 @@ TEST_F(AnalyzerTest, BuildFilesWereModified) {
// "Found dependency (all)" error since we can't handle changes to
// build files yet (crbug.com/555273).
RunBasicTest(
- "{"
- " \"files\": [ \"//a.cc\", \"//BUILD.gn\" ],"
- " \"additional_compile_targets\": [],"
- " \"test_targets\": [ \"//:a\" ]"
- "}",
- "{"
- "\"compile_targets\":[\"//:a\"],"
- "\"status\":\"Found dependency (all)\","
- "\"test_targets\":[\"//:a\"]"
+ R"({
+ "files": [ "//a.cc", "//BUILD.gn" ],
+ "additional_compile_targets": [],
+ "test_targets": [ "//:a" ]
+ })",
+ "{"
+ R"("compile_targets":["//:a"],)"
+ R"/("status":"Found dependency (all)",)/"
+ R"("test_targets":["//:a"])"
"}");
}
@@ -222,14 +220,14 @@ TEST_F(AnalyzerTest, BuildFilesWereModifiedAndCompilingAll) {
// "Found dependency (all)" error since we can't handle changes to
// build files yet (crbug.com/555273).
RunBasicTest(
- "{"
- " \"files\": [ \"//a.cc\", \"//BUILD.gn\" ],"
- " \"additional_compile_targets\": [ \"all\" ],"
- " \"test_targets\": [ \"//:a\" ]"
- "}",
- "{"
- "\"compile_targets\":[\"all\"],"
- "\"status\":\"Found dependency (all)\","
- "\"test_targets\":[\"//:a\"]"
+ R"({
+ "files": [ "//a.cc", "//BUILD.gn" ],
+ "additional_compile_targets": [ "all" ],
+ "test_targets": [ "//:a" ]
+ })",
+ "{"
+ R"("compile_targets":["all"],)"
+ R"/("status":"Found dependency (all)",)/"
+ R"("test_targets":["//:a"])"
"}");
}
diff --git a/chromium/tools/gn/args.cc b/chromium/tools/gn/args.cc
index 1a01a50c6af..967a65ec581 100644
--- a/chromium/tools/gn/args.cc
+++ b/chromium/tools/gn/args.cc
@@ -304,6 +304,8 @@ void Args::SetSystemVarsLocked(Scope* dest) const {
os = "netbsd";
#elif defined(OS_AIX)
os = "aix";
+#elif defined(OS_FUCHSIA)
+ os = "fuchsia";
#else
#error Unknown OS type.
#endif
diff --git a/chromium/tools/gn/bootstrap/bootstrap.py b/chromium/tools/gn/bootstrap/bootstrap.py
index 004d81cef5a..f154ed2ef3e 100755
--- a/chromium/tools/gn/bootstrap/bootstrap.py
+++ b/chromium/tools/gn/bootstrap/bootstrap.py
@@ -179,6 +179,7 @@ def build_gn_with_ninja_manually(tempdir, options):
write_buildflag_header_manually(root_gen_dir, 'base/debug/debugging_flags.h',
{
+ 'ENABLE_LOCATION_SOURCE': 'false',
'ENABLE_PROFILING': 'false',
'CAN_UNWIND_WITH_FRAME_POINTERS': 'false'
})
@@ -204,7 +205,7 @@ def build_gn_with_ninja_manually(tempdir, options):
write_gn_ninja(os.path.join(tempdir, 'build.ninja'),
root_gen_dir, options)
- cmd = ['ninja', '-C', tempdir]
+ cmd = ['ninja', '-C', tempdir, '-w', 'dupbuild=err']
if options.verbose:
cmd.append('-v')
@@ -458,6 +459,7 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/metrics/bucket_ranges.cc',
'base/metrics/field_trial.cc',
'base/metrics/field_trial_param_associator.cc',
+ 'base/metrics/field_trial_params.cc',
'base/metrics/histogram.cc',
'base/metrics/histogram_base.cc',
'base/metrics/histogram_functions.cc',
@@ -479,8 +481,6 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/process/process_handle.cc',
'base/process/process_iterator.cc',
'base/process/process_metrics.cc',
- 'base/profiler/scoped_profile.cc',
- 'base/profiler/scoped_tracker.cc',
'base/rand_util.cc',
'base/run_loop.cc',
'base/sequence_token.cc',
@@ -507,6 +507,7 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/task_scheduler/scheduler_lock_impl.cc',
'base/task_scheduler/scheduler_single_thread_task_runner_manager.cc',
'base/task_scheduler/scheduler_worker.cc',
+ 'base/task_scheduler/scheduler_worker_pool.cc',
'base/task_scheduler/scheduler_worker_pool_impl.cc',
'base/task_scheduler/scheduler_worker_pool_params.cc',
'base/task_scheduler/scheduler_worker_stack.cc',
@@ -523,6 +524,7 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/third_party/icu/icu_utf.cc',
'base/third_party/nspr/prtime.cc',
'base/threading/post_task_and_reply_impl.cc',
+ 'base/threading/scoped_blocking_call.cc',
'base/threading/sequence_local_storage_map.cc',
'base/threading/sequenced_task_runner_handle.cc',
'base/threading/sequenced_worker_pool.cc',
@@ -534,7 +536,6 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/threading/thread_local_storage.cc',
'base/threading/thread_restrictions.cc',
'base/threading/thread_task_runner_handle.cc',
- 'base/threading/worker_pool.cc',
'base/time/clock.cc',
'base/time/default_clock.cc',
'base/time/default_tick_clock.cc',
@@ -574,12 +575,9 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/trace_event/trace_log.cc',
'base/trace_event/trace_log_constants.cc',
'base/trace_event/tracing_agent.cc',
- 'base/tracked_objects.cc',
- 'base/tracking_info.cc',
'base/unguessable_token.cc',
'base/value_iterators.cc',
'base/values.cc',
- 'base/value_iterators.cc',
'base/vlog.cc',
])
@@ -606,17 +604,12 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/strings/string16.cc',
'base/synchronization/condition_variable_posix.cc',
'base/synchronization/lock_impl_posix.cc',
- 'base/synchronization/read_write_lock_posix.cc',
- 'base/synchronization/waitable_event_posix.cc',
'base/sys_info_posix.cc',
'base/task_scheduler/task_tracker_posix.cc',
'base/threading/platform_thread_internal_posix.cc',
'base/threading/platform_thread_posix.cc',
'base/threading/thread_local_storage_posix.cc',
- 'base/threading/worker_pool_posix.cc',
'base/time/time_conversion_posix.cc',
- 'base/time/time_exploded_posix.cc',
- 'base/time/time_now_posix.cc',
'base/trace_event/heap_profiler_allocation_register_posix.cc',
])
static_libraries['libevent'] = {
@@ -652,7 +645,6 @@ def write_gn_ninja(path, root_gen_dir, options):
static_libraries['base']['sources'].extend([
'base/memory/shared_memory_handle_posix.cc',
'base/memory/shared_memory_posix.cc',
- 'base/memory/shared_memory_tracker.cc',
'base/nix/xdg_util.cc',
'base/process/internal_linux.cc',
'base/process/memory_linux.cc',
@@ -662,7 +654,10 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/process/process_linux.cc',
'base/process/process_metrics_linux.cc',
'base/strings/sys_string_conversions_posix.cc',
+ 'base/synchronization/waitable_event_posix.cc',
'base/sys_info_linux.cc',
+ 'base/time/time_exploded_posix.cc',
+ 'base/time/time_now_posix.cc',
'base/threading/platform_thread_linux.cc',
])
if is_linux:
@@ -709,6 +704,7 @@ def write_gn_ninja(path, root_gen_dir, options):
'base/process/process_iterator_mac.cc',
'base/process/process_metrics_mac.cc',
'base/strings/sys_string_conversions_mac.mm',
+ 'base/synchronization/waitable_event_mac.cc',
'base/sys_info_mac.mm',
'base/time/time_mac.cc',
'base/threading/platform_thread_mac.mm',
@@ -827,7 +823,7 @@ def build_gn_with_gn(temp_gn, build_dir, options):
cmd = [temp_gn, 'gen', build_dir, '--args=%s' % gn_gen_args]
check_call(cmd)
- cmd = ['ninja', '-C', build_dir]
+ cmd = ['ninja', '-C', build_dir, '-w', 'dupbuild=err']
if options.verbose:
cmd.append('-v')
cmd.append('gn')
diff --git a/chromium/tools/gn/build_settings.h b/chromium/tools/gn/build_settings.h
index 3f41ae3a8de..670f996a766 100644
--- a/chromium/tools/gn/build_settings.h
+++ b/chromium/tools/gn/build_settings.h
@@ -125,7 +125,7 @@ class BuildSettings {
std::unique_ptr<std::set<SourceFile>> exec_script_whitelist_;
- BuildSettings& operator=(const BuildSettings& other); // Disallow.
+ DISALLOW_ASSIGN(BuildSettings);
};
#endif // TOOLS_GN_BUILD_SETTINGS_H_
diff --git a/chromium/tools/gn/bundle_data.cc b/chromium/tools/gn/bundle_data.cc
index c7f3fb8f054..a1f985d055c 100644
--- a/chromium/tools/gn/bundle_data.cc
+++ b/chromium/tools/gn/bundle_data.cc
@@ -133,6 +133,9 @@ void BundleData::GetOutputsAsSourceFiles(
if (!assets_catalog_sources_.empty())
outputs_as_source->push_back(GetCompiledAssetCatalogPath());
+ if (!partial_info_plist_.is_null())
+ outputs_as_source->push_back(partial_info_plist_);
+
if (!code_signing_script_.is_null()) {
std::vector<SourceFile> code_signing_output_files;
SubstitutionWriter::GetListAsSourceFiles(code_signing_outputs_,
@@ -153,18 +156,12 @@ SourceFile BundleData::GetCompiledAssetCatalogPath() const {
}
SourceFile BundleData::GetBundleRootDirOutput(const Settings* settings) const {
- const SourceDir& build_dir = settings->toolchain_output_dir();
- std::string bundle_root_relative = RebasePath(root_dir().value(), build_dir);
-
- size_t first_component = bundle_root_relative.find('/');
- if (first_component != std::string::npos) {
- base::StringPiece outermost_bundle_dir =
- base::StringPiece(bundle_root_relative).substr(0, first_component);
- std::string return_value(build_dir.value());
- outermost_bundle_dir.AppendToString(&return_value);
- return SourceFile(SourceFile::SWAP_IN, &return_value);
- }
- return SourceFile(root_dir().value());
+ std::string root_dir_value = root_dir().value();
+ size_t last_separator = root_dir_value.rfind('/');
+ if (last_separator != std::string::npos)
+ root_dir_value = root_dir_value.substr(0, last_separator);
+
+ return SourceFile(SourceFile::SWAP_IN, &root_dir_value);
}
SourceDir BundleData::GetBundleRootDirOutputAsDir(
diff --git a/chromium/tools/gn/bundle_data.h b/chromium/tools/gn/bundle_data.h
index 26c972c0a03..5264b9efae8 100644
--- a/chromium/tools/gn/bundle_data.h
+++ b/chromium/tools/gn/bundle_data.h
@@ -88,6 +88,9 @@ class BundleData {
SourceDir& root_dir() { return root_dir_; }
const SourceDir& root_dir() const { return root_dir_; }
+ SourceDir& contents_dir() { return contents_dir_; }
+ const SourceDir& contents_dir() const { return contents_dir_; }
+
SourceDir& resources_dir() { return resources_dir_; }
const SourceDir& resources_dir() const { return resources_dir_; }
@@ -114,6 +117,11 @@ class BundleData {
return xcode_test_application_name_;
}
+ void set_partial_info_plist(const SourceFile& partial_info_plist) {
+ partial_info_plist_ = partial_info_plist;
+ }
+ const SourceFile& partial_info_plist() const { return partial_info_plist_; }
+
void set_code_signing_script(const SourceFile& script_file) {
code_signing_script_ = script_file;
}
@@ -154,8 +162,9 @@ class BundleData {
std::vector<LabelPattern> bundle_deps_filter_;
// All those values are subdirectories relative to root_build_dir, and apart
- // from root_dir, they are either equal to root_dir_ or subdirectories of it.
+ // from root_dir_, they are either equal to root_dir_ or subdirectories of it.
SourceDir root_dir_;
+ SourceDir contents_dir_;
SourceDir resources_dir_;
SourceDir executable_dir_;
SourceDir plugins_dir_;
@@ -173,6 +182,10 @@ class BundleData {
// generate the Xcode project when using --ide=xcode.
std::string xcode_test_application_name_;
+ // Path to the partial Info.plist generated by the asset catalog compiler
+ // (corresponds to {{bundle_partial_info_plist}} expansion).
+ SourceFile partial_info_plist_;
+
// Holds the values (script name, sources, outputs, script arguments) for the
// code signing step if defined.
SourceFile code_signing_script_;
diff --git a/chromium/tools/gn/bundle_file_rule.cc b/chromium/tools/gn/bundle_file_rule.cc
index 5c4fc270b46..6631a59b38e 100644
--- a/chromium/tools/gn/bundle_file_rule.cc
+++ b/chromium/tools/gn/bundle_file_rule.cc
@@ -35,6 +35,9 @@ SourceFile BundleFileRule::ApplyPatternToSource(
case SUBSTITUTION_BUNDLE_ROOT_DIR:
output_path.append(bundle_data.root_dir().value());
break;
+ case SUBSTITUTION_BUNDLE_CONTENTS_DIR:
+ output_path.append(bundle_data.contents_dir().value());
+ break;
case SUBSTITUTION_BUNDLE_RESOURCES_DIR:
output_path.append(bundle_data.resources_dir().value());
break;
diff --git a/chromium/tools/gn/command_format.cc b/chromium/tools/gn/command_format.cc
index 0849762550a..22543174cc3 100644
--- a/chromium/tools/gn/command_format.cc
+++ b/chromium/tools/gn/command_format.cc
@@ -336,7 +336,7 @@ void Printer::SortIfSourcesOrDeps(const BinaryOpNode* binop) {
binop->op().value() == "-=") &&
ident && list) {
const base::StringPiece lhs = ident->value().value();
- if (lhs == "sources")
+ if (lhs == "public" || lhs == "sources")
const_cast<ListNode*>(list)->SortAsStringsList();
else if (lhs == "deps" || lhs == "public_deps")
const_cast<ListNode*>(list)->SortAsDepsList();
diff --git a/chromium/tools/gn/command_gen.cc b/chromium/tools/gn/command_gen.cc
index ce3fa286104..9c08a84b4a2 100644
--- a/chromium/tools/gn/command_gen.cc
+++ b/chromium/tools/gn/command_gen.cc
@@ -196,11 +196,11 @@ bool RunIdeWriter(const std::string& ide,
return res;
} else if (ide == kSwitchIdeValueVs || ide == kSwitchIdeValueVs2013 ||
ide == kSwitchIdeValueVs2015 || ide == kSwitchIdeValueVs2017) {
- VisualStudioWriter::Version version = VisualStudioWriter::Version::Vs2015;
+ VisualStudioWriter::Version version = VisualStudioWriter::Version::Vs2017;
if (ide == kSwitchIdeValueVs2013)
version = VisualStudioWriter::Version::Vs2013;
- else if (ide == kSwitchIdeValueVs2017)
- version = VisualStudioWriter::Version::Vs2017;
+ else if (ide == kSwitchIdeValueVs2015)
+ version = VisualStudioWriter::Version::Vs2015;
std::string sln_name;
if (command_line->HasSwitch(kSwitchSln))
diff --git a/chromium/tools/gn/create_bundle_target_generator.cc b/chromium/tools/gn/create_bundle_target_generator.cc
index 22c6cd67cc5..97973213b77 100644
--- a/chromium/tools/gn/create_bundle_target_generator.cc
+++ b/chromium/tools/gn/create_bundle_target_generator.cc
@@ -33,6 +33,9 @@ void CreateBundleTargetGenerator::DoRun() {
if (!FillBundleDir(SourceDir(), variables::kBundleRootDir,
&bundle_data.root_dir()))
return;
+ if (!FillBundleDir(bundle_data.root_dir(), variables::kBundleContentsDir,
+ &bundle_data.contents_dir()))
+ return;
if (!FillBundleDir(bundle_data.root_dir(), variables::kBundleResourcesDir,
&bundle_data.resources_dir()))
return;
@@ -49,6 +52,9 @@ void CreateBundleTargetGenerator::DoRun() {
if (!FillProductType())
return;
+ if (!FillPartialInfoPlist())
+ return;
+
if (!FillXcodeTestApplicationName())
return;
@@ -137,6 +143,29 @@ bool CreateBundleTargetGenerator::FillProductType() {
return true;
}
+bool CreateBundleTargetGenerator::FillPartialInfoPlist() {
+ const Value* value = scope_->GetValue(variables::kPartialInfoPlist, true);
+ if (!value)
+ return true;
+
+ if (!value->VerifyTypeIs(Value::STRING, err_))
+ return false;
+
+ const BuildSettings* build_settings = scope_->settings()->build_settings();
+ SourceFile path = scope_->GetSourceDir().ResolveRelativeFile(
+ *value, err_, build_settings->root_path_utf8());
+
+ if (err_->has_error())
+ return false;
+
+ if (!EnsureStringIsInOutputDir(build_settings->build_dir(), path.value(),
+ value->origin(), err_))
+ return false;
+
+ target_->bundle_data().set_partial_info_plist(path);
+ return true;
+}
+
bool CreateBundleTargetGenerator::FillXcodeTestApplicationName() {
const Value* value =
scope_->GetValue(variables::kXcodeTestApplicationName, true);
diff --git a/chromium/tools/gn/create_bundle_target_generator.h b/chromium/tools/gn/create_bundle_target_generator.h
index bf2ad2f735b..a7f1edca148 100644
--- a/chromium/tools/gn/create_bundle_target_generator.h
+++ b/chromium/tools/gn/create_bundle_target_generator.h
@@ -30,6 +30,7 @@ class CreateBundleTargetGenerator : public TargetGenerator {
bool FillXcodeExtraAttributes();
bool FillProductType();
+ bool FillPartialInfoPlist();
bool FillXcodeTestApplicationName();
bool FillCodeSigningScript();
diff --git a/chromium/tools/gn/deps_iterator.h b/chromium/tools/gn/deps_iterator.h
index 28062ae2016..c3f2c42c7dc 100644
--- a/chromium/tools/gn/deps_iterator.h
+++ b/chromium/tools/gn/deps_iterator.h
@@ -35,7 +35,7 @@ class DepsIterator {
DepsIterator& operator++();
// Comparison for STL-based loops.
- bool operator!=(const DepsIterator& other) {
+ bool operator!=(const DepsIterator& other) const {
return current_index_ != other.current_index_ ||
vect_stack_[0] != other.vect_stack_[0] ||
vect_stack_[1] != other.vect_stack_[1] ||
diff --git a/chromium/tools/gn/desc_builder.cc b/chromium/tools/gn/desc_builder.cc
index c06317f653a..38b78e8578f 100644
--- a/chromium/tools/gn/desc_builder.cc
+++ b/chromium/tools/gn/desc_builder.cc
@@ -580,6 +580,8 @@ class TargetDescBuilder : public BaseDescBuilder {
data->SetWithoutPathExpansion("plugins_dir",
RenderValue(bundle_data.plugins_dir()));
data->SetKey("product_type", base::Value(bundle_data.product_type()));
+ data->SetWithoutPathExpansion(
+ "partial_info_plist", RenderValue(bundle_data.partial_info_plist()));
auto deps = base::MakeUnique<base::ListValue>();
for (const auto* dep : bundle_data.bundle_deps())
diff --git a/chromium/tools/gn/docs/reference.md b/chromium/tools/gn/docs/reference.md
index 981b6f4574d..d5fe229aa9c 100644
--- a/chromium/tools/gn/docs/reference.md
+++ b/chromium/tools/gn/docs/reference.md
@@ -19,9 +19,9 @@
* [Target declarations](#targets)
* [action: Declare a target that runs a script a single time.](#action)
* [action_foreach: Declare a target that runs a script over a set of files.](#action_foreach)
- * [bundle_data: [iOS/OS X] Declare a target without output.](#bundle_data)
+ * [bundle_data: [iOS/macOS] Declare a target without output.](#bundle_data)
* [copy: Declare a target that copies files.](#copy)
- * [create_bundle: [iOS/OS X] Build an OS X / iOS bundle.](#create_bundle)
+ * [create_bundle: [iOS/macOS] Build an iOS or macOS bundle.](#create_bundle)
* [executable: Declare an executable target.](#executable)
* [group: Declare a named group of targets.](#group)
* [loadable_module: Declare a loadable module target.](#loadable_module)
@@ -80,6 +80,7 @@
* [args: [string list] Arguments passed to an action.](#args)
* [asmflags: [string list] Flags passed to the assembler.](#asmflags)
* [assert_no_deps: [label pattern list] Ensure no deps on these targets.](#assert_no_deps)
+ * [bundle_contents_dir: Expansion of {{bundle_contents_dir}} in create_bundle.](#bundle_contents_dir)
* [bundle_deps_filter: [label list] A list of labels that are filtered out.](#bundle_deps_filter)
* [bundle_executable_dir: Expansion of {{bundle_executable_dir}} in create_bundle](#bundle_executable_dir)
* [bundle_plugins_dir: Expansion of {{bundle_plugins_dir}} in create_bundle.](#bundle_plugins_dir)
@@ -112,6 +113,7 @@
* [output_name: [string] Name for the output file other than the default.](#output_name)
* [output_prefix_override: [boolean] Don't use prefix for output name.](#output_prefix_override)
* [outputs: [file list] Output files for actions and copy targets.](#outputs)
+ * [partial_info_plist: [filename] Path plist from asset catalog compiler.](#partial_info_plist)
* [pool: [string] Label of the pool used by the action.](#pool)
* [precompiled_header: [string] Header file to precompile.](#precompiled_header)
* [precompiled_header_type: [string] "gcc" or "msvc".](#precompiled_header_type)
@@ -1157,7 +1159,7 @@
"/{{source_name_part}}.h" ]
}
```
-### <a name="bundle_data"></a>**bundle_data**: [iOS/OS X] Declare a target without output.
+### <a name="bundle_data"></a>**bundle_data**: [iOS/macOS] Declare a target without output.
```
This target type allows to declare data that is required at runtime. It is
@@ -1169,8 +1171,8 @@
output. The output must reference a file inside of {{bundle_root_dir}}.
This target can be used on all platforms though it is designed only to
- generate iOS/OS X bundle. In cross-platform projects, it is advised to put it
- behind iOS/Mac conditionals.
+ generate iOS/macOS bundle. In cross-platform projects, it is advised to put it
+ behind iOS/macOS conditionals.
See "gn help create_bundle" for more information.
```
@@ -1248,10 +1250,10 @@
outputs = [ "$target_gen_dir/{{source_file_part}}" ]
}
```
-### <a name="create_bundle"></a>**create_bundle**: [iOS/OS X] Build an OS X / iOS bundle.
+### <a name="create_bundle"></a>**create_bundle**: [ios/macOS] Build an iOS or macOS bundle.
```
- This target generates an iOS/OS X bundle (which is a directory with a
+ This target generates an iOS or macOS bundle (which is a directory with a
well-know structure). This target does not define any sources, instead they
are computed from all "bundle_data" target this one depends on transitively
(the recursion stops at "create_bundle" targets).
@@ -1260,8 +1262,8 @@
expansion of {{bundle_*_dir}} rules in "bundle_data" outputs.
This target can be used on all platforms though it is designed only to
- generate iOS/OS X bundle. In cross-platform projects, it is advised to put it
- behind iOS/Mac conditionals.
+ generate iOS or macOS bundle. In cross-platform projects, it is advised to put
+ it behind iOS/macOS conditionals.
If a create_bundle is specified as a data_deps for another target, the bundle
is considered a leaf, and its public and private dependencies will not
@@ -1291,11 +1293,11 @@
#### **Variables**
```
- bundle_root_dir*, bundle_resources_dir*, bundle_executable_dir*,
- bundle_plugins_dir*, bundle_deps_filter, deps, data_deps, public_deps,
- visibility, product_type, code_signing_args, code_signing_script,
- code_signing_sources, code_signing_outputs, xcode_extra_attributes,
- xcode_test_application_name
+ bundle_root_dir*, bundle_contents_dir*, bundle_resources_dir*,
+ bundle_executable_dir*, bundle_plugins_dir*, bundle_deps_filter, deps,
+ data_deps, public_deps, visibility, product_type, code_signing_args,
+ code_signing_script, code_signing_sources, code_signing_outputs,
+ xcode_extra_attributes, xcode_test_application_name, partial_info_plist
* = required
```
@@ -1303,7 +1305,7 @@
```
# Defines a template to create an application. On most platform, this is just
- # an alias for an "executable" target, but on iOS/OS X, it builds an
+ # an alias for an "executable" target, but on iOS/macOS, it builds an
# application bundle.
template("app") {
if (!is_ios && !is_mac) {
@@ -1325,7 +1327,7 @@
bundle_data("${app_name}_bundle_info_plist") {
deps = [ ":${app_name}_generate_info_plist" ]
sources = [ "$gen_path/Info.plist" ]
- outputs = [ "{{bundle_root_dir}}/Info.plist" ]
+ outputs = [ "{{bundle_contents_dir}}/Info.plist" ]
}
executable("${app_name}_generate_executable") {
@@ -1353,19 +1355,21 @@
if (is_ios) {
bundle_root_dir = "${root_build_dir}/$target_name"
- bundle_resources_dir = bundle_root_dir
- bundle_executable_dir = bundle_root_dir
- bundle_plugins_dir = bundle_root_dir + "/Plugins"
+ bundle_contents_dir = bundle_root_dir
+ bundle_resources_dir = bundle_contents_dir
+ bundle_executable_dir = bundle_contents_dir
+ bundle_plugins_dir = "${bundle_contents_dir}/Plugins"
extra_attributes = {
ONLY_ACTIVE_ARCH = "YES"
DEBUG_INFORMATION_FORMAT = "dwarf"
}
} else {
- bundle_root_dir = "${root_build_dir}/target_name/Contents"
- bundle_resources_dir = bundle_root_dir + "/Resources"
- bundle_executable_dir = bundle_root_dir + "/MacOS"
- bundle_plugins_dir = bundle_root_dir + "/Plugins"
+ bundle_root_dir = "${root_build_dir}/target_name"
+ bundle_contents_dir = "${bundle_root_dir}/Contents"
+ bundle_resources_dir = "${bundle_contents_dir}/Resources"
+ bundle_executable_dir = "${bundle_contents_dir}/MacOS"
+ bundle_plugins_dir = "${bundle_contents_dir}/Plugins"
}
deps = [ ":${app_name}_bundle_info_plist" ]
if (is_ios && code_signing) {
@@ -2750,8 +2754,8 @@
"action": Defaults for actions
Platform specific tools:
- "copy_bundle_data": [iOS, OS X] Tool to copy files in a bundle.
- "compile_xcassets": [iOS, OS X] Tool to compile asset catalogs.
+ "copy_bundle_data": [iOS, macOS] Tool to copy files in a bundle.
+ "compile_xcassets": [iOS, macOS] Tool to compile asset catalogs.
```
#### **Tool variables**
@@ -3104,7 +3108,7 @@
common tool substitutions.
The copy_bundle_data and compile_xcassets tools only allows the common tool
- substitutions. Both tools are required to create iOS/OS X bundles and need
+ substitutions. Both tools are required to create iOS/macOS bundles and need
only be defined on those platforms.
The copy_bundle_data tool will be called with one source and needs to copy
@@ -3123,6 +3127,11 @@
Expands to the product_type of the bundle that will contain the
compiled asset catalog. Usually corresponds to the product_type
property of the corresponding create_bundle target.
+
+ {{bundle_partial_info_plist}}
+ Expands to the path to the partial Info.plist generated by the
+ assets catalog compiler. Usually based on the target_name of
+ the create_bundle target.
```
#### **Separate linking and dependencies for shared libraries**
@@ -3895,6 +3904,18 @@
]
}
```
+### <a name="bundle_contents_dir"></a>**bundle_contents_dir**: Expansion of {{bundle_contents_dir}} in
+```
+ create_bundle.
+
+ A string corresponding to a path in $root_build_dir.
+
+ This string is used by the "create_bundle" target to expand the
+ {{bundle_contents_dir}} of the "bundle_data" target it depends on. This must
+ correspond to a path under "bundle_root_dir".
+
+ See "gn help bundle_root_dir" for examples.
+```
### <a name="bundle_deps_filter"></a>**bundle_deps_filter**: [label list] A list of labels that are filtered out.
```
@@ -3976,15 +3997,16 @@
```
bundle_data("info_plist") {
sources = [ "Info.plist" ]
- outputs = [ "{{bundle_root_dir}}/Info.plist" ]
+ outputs = [ "{{bundle_contents_dir}}/Info.plist" ]
}
create_bundle("doom_melon.app") {
deps = [ ":info_plist" ]
- bundle_root_dir = root_build_dir + "/doom_melon.app/Contents"
- bundle_resources_dir = bundle_root_dir + "/Resources"
- bundle_executable_dir = bundle_root_dir + "/MacOS"
- bundle_plugins_dir = bundle_root_dir + "/PlugIns"
+ bundle_root_dir = "${root_build_dir}/doom_melon.app"
+ bundle_contents_dir = "${bundle_root_dir}/Contents"
+ bundle_resources_dir = "${bundle_contents_dir}/Resources"
+ bundle_executable_dir = "${bundle_contents_dir}/MacOS"
+ bundle_plugins_dir = "${bundle_contents_dir}/PlugIns"
}
```
### <a name="cflags*"></a>**cflags***: Flags passed to the C compiler.
@@ -4366,7 +4388,7 @@
However, no verification is done on these so GN doesn't enforce this. The
paths are just rebased and passed along when requested.
- Note: On iOS and OS X, create_bundle targets will not be recursed into when
+ Note: On iOS and macOS, create_bundle targets will not be recursed into when
gathering data. See "gn help create_bundle" for details.
See "gn help runtime_deps" for how these are used.
@@ -4383,7 +4405,7 @@
This is normally used for things like plugins or helper programs that a
target needs at runtime.
- Note: On iOS and OS X, create_bundle targets will not be recursed into when
+ Note: On iOS and macOS, create_bundle targets will not be recursed into when
gathering data_deps. See "gn help create_bundle" for details.
See also "gn help deps" and "gn help data".
@@ -4887,6 +4909,16 @@
Action targets (excluding action_foreach) must list literal output file(s)
with no source expansions. See "gn help action".
```
+### <a name="partial_info_plist"></a>**partial_info_plist**: [filename] Path plist from asset catalog compiler.
+
+```
+ Valid for create_bundle target, corresponds to the path for the partial
+ Info.plist created by the asset catalog compiler that needs to be merged
+ with the application Info.plist (usually done by the code signing script).
+
+ The file will be generated regardless of whether the asset compiler has
+ been invoked or not. See "gn help create_bundle".
+```
### <a name="pool"></a>**pool**: Label of the pool used by the action.
```
diff --git a/chromium/tools/gn/format_test_data/062.gn b/chromium/tools/gn/format_test_data/062.gn
index d7fbb3cc7f1..8c4cc7d8423 100644
--- a/chromium/tools/gn/format_test_data/062.gn
+++ b/chromium/tools/gn/format_test_data/062.gn
@@ -110,3 +110,13 @@ sources += [
"srtp/crypto/rng/prng.c",
"srtp/crypto/rng/rand_source.c",
]
+
+# Try "public" too. It should be treated the same.
+public = [
+ # Let's sort
+ "this", "into", "word", "salad",
+
+ # But leave
+ "these", "two"
+ # alone!
+]
diff --git a/chromium/tools/gn/format_test_data/062.golden b/chromium/tools/gn/format_test_data/062.golden
index e939e449a05..b55451011f7 100644
--- a/chromium/tools/gn/format_test_data/062.golden
+++ b/chromium/tools/gn/format_test_data/062.golden
@@ -115,3 +115,18 @@ sources += [
"srtp/srtp/ekt.c",
"srtp/srtp/srtp.c",
]
+
+# Try "public" too. It should be treated the same.
+public = [
+ # Let's sort
+ "into",
+ "salad",
+ "this",
+ "word",
+
+ # But leave
+ "these",
+ "two",
+
+ # alone!
+]
diff --git a/chromium/tools/gn/function_toolchain.cc b/chromium/tools/gn/function_toolchain.cc
index d4cd6e1ec1e..2d80273e056 100644
--- a/chromium/tools/gn/function_toolchain.cc
+++ b/chromium/tools/gn/function_toolchain.cc
@@ -534,8 +534,8 @@ Tool types
"action": Defaults for actions
Platform specific tools:
- "copy_bundle_data": [iOS, OS X] Tool to copy files in a bundle.
- "compile_xcassets": [iOS, OS X] Tool to compile asset catalogs.
+ "copy_bundle_data": [iOS, macOS] Tool to copy files in a bundle.
+ "compile_xcassets": [iOS, macOS] Tool to compile asset catalogs.
Tool variables
@@ -886,7 +886,7 @@ R"( The static library ("alink") tool allows {{arflags}} plus the common tool
common tool substitutions.
The copy_bundle_data and compile_xcassets tools only allows the common tool
- substitutions. Both tools are required to create iOS/OS X bundles and need
+ substitutions. Both tools are required to create iOS/macOS bundles and need
only be defined on those platforms.
The copy_bundle_data tool will be called with one source and needs to copy
@@ -906,6 +906,11 @@ R"( The static library ("alink") tool allows {{arflags}} plus the common tool
compiled asset catalog. Usually corresponds to the product_type
property of the corresponding create_bundle target.
+ {{bundle_partial_info_plist}}
+ Expands to the path to the partial Info.plist generated by the
+ assets catalog compiler. Usually based on the target_name of
+ the create_bundle target.
+
Separate linking and dependencies for shared libraries
Shared libraries are special in that not all changes to them require that
diff --git a/chromium/tools/gn/functions.cc b/chromium/tools/gn/functions.cc
index 540c1da7bdd..4e469ac621e 100644
--- a/chromium/tools/gn/functions.cc
+++ b/chromium/tools/gn/functions.cc
@@ -668,7 +668,7 @@ Value RunNotNeeded(Scope* scope,
const ListNode* args_list,
Err* err) {
const auto& args_vector = args_list->contents();
- if (args_vector.size() < 1 && args_vector.size() > 3) {
+ if (args_vector.size() < 1 || args_vector.size() > 3) {
*err = Err(function, "Wrong number of arguments.",
"Expecting one, two or three arguments.");
return Value();
diff --git a/chromium/tools/gn/functions_target.cc b/chromium/tools/gn/functions_target.cc
index f4369d63a75..85b2a267945 100644
--- a/chromium/tools/gn/functions_target.cc
+++ b/chromium/tools/gn/functions_target.cc
@@ -253,9 +253,9 @@ Value RunActionForEach(Scope* scope,
const char kBundleData[] = "bundle_data";
const char kBundleData_HelpShort[] =
- "bundle_data: [iOS/OS X] Declare a target without output.";
+ "bundle_data: [iOS/macOS] Declare a target without output.";
const char kBundleData_Help[] =
- R"(bundle_data: [iOS/OS X] Declare a target without output.
+ R"(bundle_data: [iOS/macOS] Declare a target without output.
This target type allows to declare data that is required at runtime. It is
used to inform "create_bundle" targets of the files to copy into generated
@@ -266,8 +266,8 @@ const char kBundleData_Help[] =
output. The output must reference a file inside of {{bundle_root_dir}}.
This target can be used on all platforms though it is designed only to
- generate iOS/OS X bundle. In cross-platform projects, it is advised to put it
- behind iOS/Mac conditionals.
+ generate iOS/macOS bundle. In cross-platform projects, it is advised to put it
+ behind iOS/macOS conditionals.
See "gn help create_bundle" for more information.
@@ -318,11 +318,11 @@ Value RunBundleData(Scope* scope,
const char kCreateBundle[] = "create_bundle";
const char kCreateBundle_HelpShort[] =
- "create_bundle: [iOS/OS X] Build an OS X / iOS bundle.";
+ "create_bundle: [iOS/macOS] Build an iOS or macOS bundle.";
const char kCreateBundle_Help[] =
- R"(create_bundle: [iOS/OS X] Build an OS X / iOS bundle.
+ R"(create_bundle: [ios/macOS] Build an iOS or macOS bundle.
- This target generates an iOS/OS X bundle (which is a directory with a
+ This target generates an iOS or macOS bundle (which is a directory with a
well-know structure). This target does not define any sources, instead they
are computed from all "bundle_data" target this one depends on transitively
(the recursion stops at "create_bundle" targets).
@@ -331,8 +331,8 @@ const char kCreateBundle_Help[] =
expansion of {{bundle_*_dir}} rules in "bundle_data" outputs.
This target can be used on all platforms though it is designed only to
- generate iOS/OS X bundle. In cross-platform projects, it is advised to put it
- behind iOS/Mac conditionals.
+ generate iOS or macOS bundle. In cross-platform projects, it is advised to put
+ it behind iOS/macOS conditionals.
If a create_bundle is specified as a data_deps for another target, the bundle
is considered a leaf, and its public and private dependencies will not
@@ -358,17 +358,17 @@ Code signing
Variables
- bundle_root_dir*, bundle_resources_dir*, bundle_executable_dir*,
- bundle_plugins_dir*, bundle_deps_filter, deps, data_deps, public_deps,
- visibility, product_type, code_signing_args, code_signing_script,
- code_signing_sources, code_signing_outputs, xcode_extra_attributes,
- xcode_test_application_name
+ bundle_root_dir*, bundle_contents_dir*, bundle_resources_dir*,
+ bundle_executable_dir*, bundle_plugins_dir*, bundle_deps_filter, deps,
+ data_deps, public_deps, visibility, product_type, code_signing_args,
+ code_signing_script, code_signing_sources, code_signing_outputs,
+ xcode_extra_attributes, xcode_test_application_name, partial_info_plist
* = required
Example
# Defines a template to create an application. On most platform, this is just
- # an alias for an "executable" target, but on iOS/OS X, it builds an
+ # an alias for an "executable" target, but on iOS/macOS, it builds an
# application bundle.
template("app") {
if (!is_ios && !is_mac) {
@@ -390,7 +390,7 @@ Example
bundle_data("${app_name}_bundle_info_plist") {
deps = [ ":${app_name}_generate_info_plist" ]
sources = [ "$gen_path/Info.plist" ]
- outputs = [ "{{bundle_root_dir}}/Info.plist" ]
+ outputs = [ "{{bundle_contents_dir}}/Info.plist" ]
}
executable("${app_name}_generate_executable") {
@@ -418,19 +418,21 @@ Example
if (is_ios) {
bundle_root_dir = "${root_build_dir}/$target_name"
- bundle_resources_dir = bundle_root_dir
- bundle_executable_dir = bundle_root_dir
- bundle_plugins_dir = bundle_root_dir + "/Plugins"
+ bundle_contents_dir = bundle_root_dir
+ bundle_resources_dir = bundle_contents_dir
+ bundle_executable_dir = bundle_contents_dir
+ bundle_plugins_dir = "${bundle_contents_dir}/Plugins"
extra_attributes = {
ONLY_ACTIVE_ARCH = "YES"
DEBUG_INFORMATION_FORMAT = "dwarf"
}
} else {
- bundle_root_dir = "${root_build_dir}/target_name/Contents"
- bundle_resources_dir = bundle_root_dir + "/Resources"
- bundle_executable_dir = bundle_root_dir + "/MacOS"
- bundle_plugins_dir = bundle_root_dir + "/Plugins"
+ bundle_root_dir = "${root_build_dir}/target_name"
+ bundle_contents_dir = "${bundle_root_dir}/Contents"
+ bundle_resources_dir = "${bundle_contents_dir}/Resources"
+ bundle_executable_dir = "${bundle_contents_dir}/MacOS"
+ bundle_plugins_dir = "${bundle_contents_dir}/Plugins"
}
deps = [ ":${app_name}_bundle_info_plist" ]
if (is_ios && code_signing) {
diff --git a/chromium/tools/gn/header_checker.cc b/chromium/tools/gn/header_checker.cc
index dd8dc0eef82..2bfaec5a485 100644
--- a/chromium/tools/gn/header_checker.cc
+++ b/chromium/tools/gn/header_checker.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "base/bind.h"
+#include "base/containers/queue.h"
#include "base/files/file_util.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_util.h"
@@ -480,7 +481,7 @@ bool HeaderChecker::IsDependencyOf(const Target* search_for,
// search_for.
std::map<const Target*, ChainLink> breadcrumbs;
- std::queue<ChainLink> work_queue;
+ base::queue<ChainLink> work_queue;
work_queue.push(ChainLink(search_from, true));
bool first_time = true;
diff --git a/chromium/tools/gn/misc/emacs/gn-mode.el b/chromium/tools/gn/misc/emacs/gn-mode.el
index 931207ba164..4474bde3044 100644
--- a/chromium/tools/gn/misc/emacs/gn-mode.el
+++ b/chromium/tools/gn/misc/emacs/gn-mode.el
@@ -92,7 +92,7 @@ variable name or the '{{' and '}}' which surround it."
"output_prefix_override" "outputs" "pool" "precompiled_header"
"precompiled_header_type" "precompiled_source" "product_type" "public"
"public_configs" "public_deps" "response_file_contents" "script" "sources"
- "testonly" "visibility" "write_runtime_deps"))
+ "testonly" "visibility" "write_runtime_deps" "bundle_contents_dir"))
(defconst gn-font-lock-keywords
`((,(regexp-opt gn-font-lock-reserved-keywords 'words) .
diff --git a/chromium/tools/gn/ninja_create_bundle_target_writer.cc b/chromium/tools/gn/ninja_create_bundle_target_writer.cc
index 77e05b20e28..038d5260b14 100644
--- a/chromium/tools/gn/ninja_create_bundle_target_writer.cc
+++ b/chromium/tools/gn/ninja_create_bundle_target_writer.cc
@@ -138,19 +138,56 @@ void NinjaCreateBundleTargetWriter::WriteCopyBundleFileRuleSteps(
void NinjaCreateBundleTargetWriter::WriteCompileAssetsCatalogStep(
std::vector<OutputFile>* output_files) {
- if (target_->bundle_data().assets_catalog_sources().empty())
+ if (target_->bundle_data().assets_catalog_sources().empty() &&
+ target_->bundle_data().partial_info_plist().is_null())
return;
+ OutputFile compiled_catalog;
+ if (!target_->bundle_data().assets_catalog_sources().empty()) {
+ compiled_catalog =
+ OutputFile(settings_->build_settings(),
+ target_->bundle_data().GetCompiledAssetCatalogPath());
+ output_files->push_back(compiled_catalog);
+ }
+
+ OutputFile partial_info_plist;
+ if (!target_->bundle_data().partial_info_plist().is_null()) {
+ partial_info_plist =
+ OutputFile(settings_->build_settings(),
+ target_->bundle_data().partial_info_plist());
+
+ output_files->push_back(partial_info_plist);
+ }
+
+ // If there are no asset catalog to compile but the "partial_info_plist" is
+ // non-empty, then add a target to generate an empty file (to avoid breaking
+ // code that depends on this file existence).
+ if (target_->bundle_data().assets_catalog_sources().empty()) {
+ DCHECK(!target_->bundle_data().partial_info_plist().is_null());
+
+ out_ << "build ";
+ path_output_.WriteFile(out_, partial_info_plist);
+ out_ << ": " << GetNinjaRulePrefixForToolchain(settings_)
+ << Toolchain::ToolTypeToName(Toolchain::TYPE_STAMP) << std::endl;
+
+ return;
+ }
+
OutputFile input_dep = WriteCompileAssetsCatalogInputDepsStamp(
target_->bundle_data().assets_catalog_deps());
DCHECK(!input_dep.value().empty());
- OutputFile output_file(settings_->build_settings(),
- target_->bundle_data().GetCompiledAssetCatalogPath());
- output_files->push_back(output_file);
-
out_ << "build ";
- path_output_.WriteFile(out_, output_file);
+ path_output_.WriteFile(out_, compiled_catalog);
+ if (partial_info_plist != OutputFile()) {
+ // If "partial_info_plist" is non-empty, then add it to list of implicit
+ // outputs of the asset catalog compilation, so that target can use it
+ // without getting the ninja error "'foo', needed by 'bar', missing and
+ // no known rule to make it".
+ out_ << " | ";
+ path_output_.WriteFile(out_, partial_info_plist);
+ }
+
out_ << ": " << GetNinjaRulePrefixForToolchain(settings_)
<< Toolchain::ToolTypeToName(Toolchain::TYPE_COMPILE_XCASSETS);
@@ -167,6 +204,12 @@ void NinjaCreateBundleTargetWriter::WriteCompileAssetsCatalogStep(
out_ << " product_type = " << target_->bundle_data().product_type()
<< std::endl;
+
+ if (partial_info_plist != OutputFile()) {
+ out_ << " partial_info_plist = ";
+ path_output_.WriteFile(out_, partial_info_plist);
+ out_ << std::endl;
+ }
}
OutputFile
diff --git a/chromium/tools/gn/ninja_create_bundle_target_writer_unittest.cc b/chromium/tools/gn/ninja_create_bundle_target_writer_unittest.cc
index 15885baf739..06fbd516410 100644
--- a/chromium/tools/gn/ninja_create_bundle_target_writer_unittest.cc
+++ b/chromium/tools/gn/ninja_create_bundle_target_writer_unittest.cc
@@ -14,11 +14,15 @@
namespace {
void SetupBundleDataDir(BundleData* bundle_data, const std::string& root_dir) {
- std::string bundle_root_dir = root_dir + "/bar.bundle/Contents";
+ std::string bundle_root_dir = root_dir + "/bar.bundle";
bundle_data->root_dir() = SourceDir(bundle_root_dir);
- bundle_data->resources_dir() = SourceDir(bundle_root_dir + "/Resources");
- bundle_data->executable_dir() = SourceDir(bundle_root_dir + "/MacOS");
- bundle_data->plugins_dir() = SourceDir(bundle_root_dir + "/Plug Ins");
+ bundle_data->contents_dir() = SourceDir(bundle_root_dir + "/Contents");
+ bundle_data->resources_dir() =
+ SourceDir(bundle_data->contents_dir().value() + "/Resources");
+ bundle_data->executable_dir() =
+ SourceDir(bundle_data->contents_dir().value() + "/MacOS");
+ bundle_data->plugins_dir() =
+ SourceDir(bundle_data->contents_dir().value() + "/Plug Ins");
}
} // namespace
@@ -65,6 +69,79 @@ TEST(NinjaCreateBundleTargetWriter, Run) {
EXPECT_EQ(expected, out_str);
}
+// Tests creating a bundle in a sub-directory of $root_out_dir.
+TEST(NinjaCreateBundleTargetWriter, InSubDirectory) {
+ Err err;
+ TestWithScope setup;
+
+ Target bundle_data(setup.settings(), Label(SourceDir("//foo/"), "data"));
+ bundle_data.set_output_type(Target::BUNDLE_DATA);
+ bundle_data.sources().push_back(SourceFile("//foo/input1.txt"));
+ bundle_data.sources().push_back(SourceFile("//foo/input2.txt"));
+ bundle_data.action_values().outputs() = SubstitutionList::MakeForTest(
+ "{{bundle_resources_dir}}/{{source_file_part}}");
+ bundle_data.SetToolchain(setup.toolchain());
+ bundle_data.visibility().SetPublic();
+ ASSERT_TRUE(bundle_data.OnResolved(&err));
+
+ Target create_bundle(
+ setup.settings(),
+ Label(SourceDir("//baz/"), "bar", setup.toolchain()->label().dir(),
+ setup.toolchain()->label().name()));
+ SetupBundleDataDir(&create_bundle.bundle_data(), "//out/Debug/gen");
+ create_bundle.set_output_type(Target::CREATE_BUNDLE);
+ create_bundle.private_deps().push_back(LabelTargetPair(&bundle_data));
+ create_bundle.SetToolchain(setup.toolchain());
+ ASSERT_TRUE(create_bundle.OnResolved(&err));
+
+ std::ostringstream out;
+ NinjaCreateBundleTargetWriter writer(&create_bundle, out);
+ writer.Run();
+
+ const char expected[] =
+ "build gen/bar.bundle/Contents/Resources/input1.txt: copy_bundle_data "
+ "../../foo/input1.txt\n"
+ "build gen/bar.bundle/Contents/Resources/input2.txt: copy_bundle_data "
+ "../../foo/input2.txt\n"
+ "build obj/baz/bar.stamp: stamp "
+ "gen/bar.bundle/Contents/Resources/input1.txt "
+ "gen/bar.bundle/Contents/Resources/input2.txt\n"
+ "build gen/bar.bundle: phony obj/baz/bar.stamp\n";
+ std::string out_str = out.str();
+ EXPECT_EQ(expected, out_str);
+}
+
+// Tests empty asset catalog with partial_info_plist property defined.
+TEST(NinjaCreateBundleTargetWriter, JustPartialInfoPlist) {
+ Err err;
+ TestWithScope setup;
+
+ Target create_bundle(
+ setup.settings(),
+ Label(SourceDir("//baz/"), "bar", setup.toolchain()->label().dir(),
+ setup.toolchain()->label().name()));
+ SetupBundleDataDir(&create_bundle.bundle_data(), "//out/Debug");
+ create_bundle.set_output_type(Target::CREATE_BUNDLE);
+ create_bundle.bundle_data().product_type().assign("com.apple.product-type");
+ create_bundle.bundle_data().set_partial_info_plist(
+ SourceFile("//out/Debug/baz/bar/bar_partial_info.plist"));
+ create_bundle.SetToolchain(setup.toolchain());
+ ASSERT_TRUE(create_bundle.OnResolved(&err));
+
+ std::ostringstream out;
+ NinjaCreateBundleTargetWriter writer(&create_bundle, out);
+ writer.Run();
+
+ const char expected[] =
+ "build baz/bar/bar_partial_info.plist: stamp\n"
+ "build obj/baz/bar.stamp: stamp "
+ "baz/bar/bar_partial_info.plist\n"
+ "build bar.bundle: phony obj/baz/bar.stamp\n";
+ std::string out_str = out.str();
+ EXPECT_EQ(expected, out_str);
+}
+
+
// Tests multiple files from asset catalog.
TEST(NinjaCreateBundleTargetWriter, AssetCatalog) {
Err err;
@@ -151,7 +228,7 @@ TEST(NinjaCreateBundleTargetWriter, Complex) {
bundle_data0.set_output_type(Target::BUNDLE_DATA);
bundle_data0.sources().push_back(SourceFile("//qux/qux-Info.plist"));
bundle_data0.action_values().outputs() =
- SubstitutionList::MakeForTest("{{bundle_root_dir}}/Info.plist");
+ SubstitutionList::MakeForTest("{{bundle_contents_dir}}/Info.plist");
bundle_data0.SetToolchain(setup.toolchain());
bundle_data0.visibility().SetPublic();
ASSERT_TRUE(bundle_data0.OnResolved(&err));
@@ -213,6 +290,8 @@ TEST(NinjaCreateBundleTargetWriter, Complex) {
create_bundle.private_deps().push_back(LabelTargetPair(&bundle_data2));
create_bundle.private_deps().push_back(LabelTargetPair(&bundle_data3));
create_bundle.bundle_data().product_type().assign("com.apple.product-type");
+ create_bundle.bundle_data().set_partial_info_plist(
+ SourceFile("//out/Debug/baz/bar/bar_partial_info.plist"));
create_bundle.SetToolchain(setup.toolchain());
ASSERT_TRUE(create_bundle.OnResolved(&err));
@@ -230,15 +309,18 @@ TEST(NinjaCreateBundleTargetWriter, Complex) {
"build obj/baz/bar.xcassets.inputdeps.stamp: stamp "
"obj/foo/assets.stamp "
"obj/quz/assets.stamp\n"
- "build bar.bundle/Contents/Resources/Assets.car: compile_xcassets "
+ "build bar.bundle/Contents/Resources/Assets.car | "
+ "baz/bar/bar_partial_info.plist: compile_xcassets "
"../../foo/Foo.xcassets "
"../../quz/Quz.xcassets | obj/baz/bar.xcassets.inputdeps.stamp\n"
" product_type = com.apple.product-type\n"
+ " partial_info_plist = baz/bar/bar_partial_info.plist\n"
"build obj/baz/bar.stamp: stamp "
"bar.bundle/Contents/Info.plist "
"bar.bundle/Contents/Resources/input1.txt "
"bar.bundle/Contents/Resources/input2.txt "
- "bar.bundle/Contents/Resources/Assets.car\n"
+ "bar.bundle/Contents/Resources/Assets.car "
+ "baz/bar/bar_partial_info.plist\n"
"build bar.bundle: phony obj/baz/bar.stamp\n";
std::string out_str = out.str();
EXPECT_EQ(expected, out_str);
diff --git a/chromium/tools/gn/pool.h b/chromium/tools/gn/pool.h
index 81a021db659..42a80789ec8 100644
--- a/chromium/tools/gn/pool.h
+++ b/chromium/tools/gn/pool.h
@@ -7,6 +7,7 @@
#include <string>
+#include "base/macros.h"
#include "tools/gn/item.h"
// Represents a named pool in the dependency graph.
@@ -18,9 +19,6 @@ class Pool : public Item {
using Item::Item;
~Pool() override;
- Pool(const Pool&) = delete;
- Pool& operator=(const Pool&) = delete;
-
// Item implementation.
Pool* AsPool() override;
const Pool* AsPool() const override;
@@ -36,6 +34,8 @@ class Pool : public Item {
std::string GetNinjaName(bool include_toolchain) const;
int64_t depth_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(Pool);
};
#endif // TOOLS_GN_POOL_H_
diff --git a/chromium/tools/gn/runtime_deps_unittest.cc b/chromium/tools/gn/runtime_deps_unittest.cc
index ed0eea4199e..5dc89eb5a18 100644
--- a/chromium/tools/gn/runtime_deps_unittest.cc
+++ b/chromium/tools/gn/runtime_deps_unittest.cc
@@ -350,10 +350,12 @@ TEST(RuntimeDeps, CreateBundle) {
Target bundle(setup.settings(), Label(source_dir, "bundle"));
InitTargetWithType(setup, &bundle, Target::CREATE_BUNDLE);
- const std::string root_dir(build_dir + "Bundle.framework/Versions/A/");
+ const std::string root_dir(build_dir + "Bundle.framework/");
+ const std::string contents_dir(root_dir + "Versions/A/");
bundle.bundle_data().root_dir() = SourceDir(root_dir);
- bundle.bundle_data().resources_dir() = SourceDir(root_dir + "Resources");
- bundle.bundle_data().executable_dir() = SourceDir(root_dir + "MacOS");
+ bundle.bundle_data().contents_dir() = SourceDir(contents_dir);
+ bundle.bundle_data().resources_dir() = SourceDir(contents_dir + "Resources");
+ bundle.bundle_data().executable_dir() = SourceDir(contents_dir + "MacOS");
bundle.private_deps().push_back(LabelTargetPair(&dylib_data));
bundle.private_deps().push_back(LabelTargetPair(&module_data));
bundle.data_deps().push_back(LabelTargetPair(&data_dep));
diff --git a/chromium/tools/gn/substitution_type.cc b/chromium/tools/gn/substitution_type.cc
index 99b4e6802df..14bdcdc8dfc 100644
--- a/chromium/tools/gn/substitution_type.cc
+++ b/chromium/tools/gn/substitution_type.cc
@@ -10,53 +10,55 @@
#include "tools/gn/err.h"
const char* kSubstitutionNames[SUBSTITUTION_NUM_TYPES] = {
- "<<literal>>", // SUBSTITUTION_LITERAL
-
- "{{source}}", // SUBSTITUTION_SOURCE
- "{{output}}", // SUBSTITUTION_OUTPUT
-
- "{{source_name_part}}", // SUBSTITUTION_NAME_PART
- "{{source_file_part}}", // SUBSTITUTION_FILE_PART
- "{{source_dir}}", // SUBSTITUTION_SOURCE_DIR
- "{{source_root_relative_dir}}", // SUBSTITUTION_SOURCE_ROOT_RELATIVE_DIR
- "{{source_gen_dir}}", // SUBSTITUTION_SOURCE_GEN_DIR
- "{{source_out_dir}}", // SUBSTITUTION_SOURCE_OUT_DIR
- "{{source_target_relative}}", // SUBSTITUTION_SOURCE_TARGET_RELATIVE
-
- "{{label}}", // SUBSTITUTION_LABEL
- "{{label_name}}", // SUBSTITUTION_LABEL_NAME
- "{{root_gen_dir}}", // SUBSTITUTION_ROOT_GEN_DIR
- "{{root_out_dir}}", // SUBSTITUTION_ROOT_OUT_DIR
- "{{target_gen_dir}}", // SUBSTITUTION_TARGET_GEN_DIR
- "{{target_out_dir}}", // SUBSTITUTION_TARGET_OUT_DIR
- "{{target_output_name}}", // SUBSTITUTION_TARGET_OUTPUT_NAME
-
- "{{asmflags}}", // SUBSTITUTION_ASMFLAGS
- "{{cflags}}", // SUBSTITUTION_CFLAGS
- "{{cflags_c}}", // SUBSTITUTION_CFLAGS_C
- "{{cflags_cc}}", // SUBSTITUTION_CFLAGS_CC
- "{{cflags_objc}}", // SUBSTITUTION_CFLAGS_OBJC
- "{{cflags_objcc}}", // SUBSTITUTION_CFLAGS_OBJCC
- "{{defines}}", // SUBSTITUTION_DEFINES
- "{{include_dirs}}", // SUBSTITUTION_INCLUDE_DIRS
-
- "{{inputs}}", // SUBSTITUTION_LINKER_INPUTS
- "{{inputs_newline}}", // SUBSTITUTION_LINKER_INPUTS_NEWLINE
- "{{ldflags}}", // SUBSTITUTION_LDFLAGS
- "{{libs}}", // SUBSTITUTION_LIBS
- "{{output_dir}}", // SUBSTITUTION_OUTPUT_DIR
- "{{output_extension}}", // SUBSTITUTION_OUTPUT_EXTENSION
- "{{solibs}}", // SUBSTITUTION_SOLIBS
-
- "{{arflags}}", // SUBSTITUTION_ARFLAGS
-
- "{{bundle_root_dir}}", // SUBSTITUTION_BUNDLE_ROOT_DIR
- "{{bundle_resources_dir}}", // SUBSTITUTION_BUNDLE_RESOURCES_DIR
- "{{bundle_executable_dir}}", // SUBSTITUTION_BUNDLE_EXECUTABLE_DIR
- "{{bundle_plugins_dir}}", // SUBSTITUTION_BUNDLE_PLUGINS_DIR
- "{{bundle_product_type}}", // SUBSTITUTION_BUNDLE_PRODUCT_TYPE
-
- "{{response_file_name}}", // SUBSTITUTION_RSP_FILE_NAME
+ "<<literal>>", // SUBSTITUTION_LITERAL
+
+ "{{source}}", // SUBSTITUTION_SOURCE
+ "{{output}}", // SUBSTITUTION_OUTPUT
+
+ "{{source_name_part}}", // SUBSTITUTION_NAME_PART
+ "{{source_file_part}}", // SUBSTITUTION_FILE_PART
+ "{{source_dir}}", // SUBSTITUTION_SOURCE_DIR
+ "{{source_root_relative_dir}}", // SUBSTITUTION_SOURCE_ROOT_RELATIVE_DIR
+ "{{source_gen_dir}}", // SUBSTITUTION_SOURCE_GEN_DIR
+ "{{source_out_dir}}", // SUBSTITUTION_SOURCE_OUT_DIR
+ "{{source_target_relative}}", // SUBSTITUTION_SOURCE_TARGET_RELATIVE
+
+ "{{label}}", // SUBSTITUTION_LABEL
+ "{{label_name}}", // SUBSTITUTION_LABEL_NAME
+ "{{root_gen_dir}}", // SUBSTITUTION_ROOT_GEN_DIR
+ "{{root_out_dir}}", // SUBSTITUTION_ROOT_OUT_DIR
+ "{{target_gen_dir}}", // SUBSTITUTION_TARGET_GEN_DIR
+ "{{target_out_dir}}", // SUBSTITUTION_TARGET_OUT_DIR
+ "{{target_output_name}}", // SUBSTITUTION_TARGET_OUTPUT_NAME
+
+ "{{asmflags}}", // SUBSTITUTION_ASMFLAGS
+ "{{cflags}}", // SUBSTITUTION_CFLAGS
+ "{{cflags_c}}", // SUBSTITUTION_CFLAGS_C
+ "{{cflags_cc}}", // SUBSTITUTION_CFLAGS_CC
+ "{{cflags_objc}}", // SUBSTITUTION_CFLAGS_OBJC
+ "{{cflags_objcc}}", // SUBSTITUTION_CFLAGS_OBJCC
+ "{{defines}}", // SUBSTITUTION_DEFINES
+ "{{include_dirs}}", // SUBSTITUTION_INCLUDE_DIRS
+
+ "{{inputs}}", // SUBSTITUTION_LINKER_INPUTS
+ "{{inputs_newline}}", // SUBSTITUTION_LINKER_INPUTS_NEWLINE
+ "{{ldflags}}", // SUBSTITUTION_LDFLAGS
+ "{{libs}}", // SUBSTITUTION_LIBS
+ "{{output_dir}}", // SUBSTITUTION_OUTPUT_DIR
+ "{{output_extension}}", // SUBSTITUTION_OUTPUT_EXTENSION
+ "{{solibs}}", // SUBSTITUTION_SOLIBS
+
+ "{{arflags}}", // SUBSTITUTION_ARFLAGS
+
+ "{{bundle_root_dir}}", // SUBSTITUTION_BUNDLE_ROOT_DIR
+ "{{bundle_contents_dir}}", // SUBSTITUTION_BUNDLE_CONTENTS_DIR
+ "{{bundle_resources_dir}}", // SUBSTITUTION_BUNDLE_RESOURCES_DIR
+ "{{bundle_executable_dir}}", // SUBSTITUTION_BUNDLE_EXECUTABLE_DIR
+ "{{bundle_plugins_dir}}", // SUBSTITUTION_BUNDLE_PLUGINS_DIR
+ "{{bundle_product_type}}", // SUBSTITUTION_BUNDLE_PRODUCT_TYPE
+ "{{bundle_partial_info_plist}}", // SUBSTITUTION_BUNDLE_PARTIAL_INFO_PLIST,
+
+ "{{response_file_name}}", // SUBSTITUTION_RSP_FILE_NAME
};
const char* kSubstitutionNinjaNames[SUBSTITUTION_NUM_TYPES] = {
@@ -101,13 +103,15 @@ const char* kSubstitutionNinjaNames[SUBSTITUTION_NUM_TYPES] = {
"output_extension", // SUBSTITUTION_OUTPUT_EXTENSION
"solibs", // SUBSTITUTION_SOLIBS
- "arflags", // SUBSTITUTION_ARFLAGS
+ "arflags", // SUBSTITUTION_ARFLAGS
"bundle_root_dir", // SUBSTITUTION_BUNDLE_ROOT_DIR
+ "bundle_contents_dir", // SUBSTITUTION_BUNDLE_CONTENTS_DIR
"bundle_resources_dir", // SUBSTITUTION_BUNDLE_RESOURCES_DIR
"bundle_executable_dir", // SUBSTITUTION_BUNDLE_EXECUTABLE_DIR
"bundle_plugins_dir", // SUBSTITUTION_BUNDLE_PLUGINS_DIR
"product_type", // SUBSTITUTION_BUNDLE_PRODUCT_TYPE
+ "partial_info_plist", // SUBSTITUTION_BUNDLE_PARTIAL_INFO_PLIST
"rspfile", // SUBSTITUTION_RSP_FILE_NAME
};
@@ -138,6 +142,7 @@ bool SubstitutionIsInOutputDir(SubstitutionType type) {
bool SubstitutionIsInBundleDir(SubstitutionType type) {
return type == SUBSTITUTION_BUNDLE_ROOT_DIR ||
+ type == SUBSTITUTION_BUNDLE_CONTENTS_DIR ||
type == SUBSTITUTION_BUNDLE_RESOURCES_DIR ||
type == SUBSTITUTION_BUNDLE_EXECUTABLE_DIR ||
type == SUBSTITUTION_BUNDLE_PLUGINS_DIR;
@@ -149,6 +154,7 @@ bool IsValidBundleDataSubstitution(SubstitutionType type) {
type == SUBSTITUTION_SOURCE_FILE_PART ||
type == SUBSTITUTION_SOURCE_ROOT_RELATIVE_DIR ||
type == SUBSTITUTION_BUNDLE_ROOT_DIR ||
+ type == SUBSTITUTION_BUNDLE_CONTENTS_DIR ||
type == SUBSTITUTION_BUNDLE_RESOURCES_DIR ||
type == SUBSTITUTION_BUNDLE_EXECUTABLE_DIR ||
type == SUBSTITUTION_BUNDLE_PLUGINS_DIR;
@@ -236,9 +242,9 @@ bool IsValidCopySubstitution(SubstitutionType type) {
}
bool IsValidCompileXCassetsSubstitution(SubstitutionType type) {
- return IsValidToolSubstitution(type) ||
- type == SUBSTITUTION_LINKER_INPUTS ||
- type == SUBSTITUTION_BUNDLE_PRODUCT_TYPE;
+ return IsValidToolSubstitution(type) || type == SUBSTITUTION_LINKER_INPUTS ||
+ type == SUBSTITUTION_BUNDLE_PRODUCT_TYPE ||
+ type == SUBSTITUTION_BUNDLE_PARTIAL_INFO_PLIST;
}
bool EnsureValidSubstitutions(const std::vector<SubstitutionType>& types,
diff --git a/chromium/tools/gn/substitution_type.h b/chromium/tools/gn/substitution_type.h
index df7f8b7abcf..bb3c803d4fa 100644
--- a/chromium/tools/gn/substitution_type.h
+++ b/chromium/tools/gn/substitution_type.h
@@ -21,57 +21,59 @@ enum SubstitutionType {
// These map to Ninja's {in} and {out} variables.
SUBSTITUTION_SOURCE = SUBSTITUTION_FIRST_PATTERN, // {{source}}
- SUBSTITUTION_OUTPUT, // {{output}}
+ SUBSTITUTION_OUTPUT, // {{output}}
// Valid for all compiler tools.
- SUBSTITUTION_SOURCE_NAME_PART, // {{source_name_part}}
- SUBSTITUTION_SOURCE_FILE_PART, // {{source_file_part}}
- SUBSTITUTION_SOURCE_DIR, // {{source_dir}}
+ SUBSTITUTION_SOURCE_NAME_PART, // {{source_name_part}}
+ SUBSTITUTION_SOURCE_FILE_PART, // {{source_file_part}}
+ SUBSTITUTION_SOURCE_DIR, // {{source_dir}}
SUBSTITUTION_SOURCE_ROOT_RELATIVE_DIR, // {{root_relative_dir}}
- SUBSTITUTION_SOURCE_GEN_DIR, // {{source_gen_dir}}
- SUBSTITUTION_SOURCE_OUT_DIR, // {{source_out_dir}}
- SUBSTITUTION_SOURCE_TARGET_RELATIVE, // {{source_target_relative}}
+ SUBSTITUTION_SOURCE_GEN_DIR, // {{source_gen_dir}}
+ SUBSTITUTION_SOURCE_OUT_DIR, // {{source_out_dir}}
+ SUBSTITUTION_SOURCE_TARGET_RELATIVE, // {{source_target_relative}}
// Valid for all compiler and linker tools. These depend on the target and
// do not vary on a per-file basis.
- SUBSTITUTION_LABEL, // {{label}}
- SUBSTITUTION_LABEL_NAME, // {{label_name}}
- SUBSTITUTION_ROOT_GEN_DIR, // {{root_gen_dir}}
- SUBSTITUTION_ROOT_OUT_DIR, // {{root_out_dir}}
- SUBSTITUTION_TARGET_GEN_DIR, // {{target_gen_dir}}
- SUBSTITUTION_TARGET_OUT_DIR, // {{target_out_dir}}
+ SUBSTITUTION_LABEL, // {{label}}
+ SUBSTITUTION_LABEL_NAME, // {{label_name}}
+ SUBSTITUTION_ROOT_GEN_DIR, // {{root_gen_dir}}
+ SUBSTITUTION_ROOT_OUT_DIR, // {{root_out_dir}}
+ SUBSTITUTION_TARGET_GEN_DIR, // {{target_gen_dir}}
+ SUBSTITUTION_TARGET_OUT_DIR, // {{target_out_dir}}
SUBSTITUTION_TARGET_OUTPUT_NAME, // {{target_output_name}}
// Valid for compiler tools.
- SUBSTITUTION_ASMFLAGS, // {{asmflags}}
- SUBSTITUTION_CFLAGS, // {{cflags}}
- SUBSTITUTION_CFLAGS_C, // {{cflags_c}}
- SUBSTITUTION_CFLAGS_CC, // {{cflags_cc}}
- SUBSTITUTION_CFLAGS_OBJC, // {{cflags_objc}}
+ SUBSTITUTION_ASMFLAGS, // {{asmflags}}
+ SUBSTITUTION_CFLAGS, // {{cflags}}
+ SUBSTITUTION_CFLAGS_C, // {{cflags_c}}
+ SUBSTITUTION_CFLAGS_CC, // {{cflags_cc}}
+ SUBSTITUTION_CFLAGS_OBJC, // {{cflags_objc}}
SUBSTITUTION_CFLAGS_OBJCC, // {{cflags_objcc}}
- SUBSTITUTION_DEFINES, // {{defines}}
+ SUBSTITUTION_DEFINES, // {{defines}}
SUBSTITUTION_INCLUDE_DIRS, // {{include_dirs}}
// Valid for linker tools.
- SUBSTITUTION_LINKER_INPUTS, // {{inputs}}
+ SUBSTITUTION_LINKER_INPUTS, // {{inputs}}
SUBSTITUTION_LINKER_INPUTS_NEWLINE, // {{inputs_newline}}
- SUBSTITUTION_LDFLAGS, // {{ldflags}}
- SUBSTITUTION_LIBS, // {{libs}}
- SUBSTITUTION_OUTPUT_DIR, // {{output_dir}}
- SUBSTITUTION_OUTPUT_EXTENSION, // {{output_extension}}
- SUBSTITUTION_SOLIBS, // {{solibs}}
+ SUBSTITUTION_LDFLAGS, // {{ldflags}}
+ SUBSTITUTION_LIBS, // {{libs}}
+ SUBSTITUTION_OUTPUT_DIR, // {{output_dir}}
+ SUBSTITUTION_OUTPUT_EXTENSION, // {{output_extension}}
+ SUBSTITUTION_SOLIBS, // {{solibs}}
// Valid for alink only.
SUBSTITUTION_ARFLAGS, // {{arflags}}
// Valid for bundle_data targets.
- SUBSTITUTION_BUNDLE_ROOT_DIR, // {{bundle_root_dir}}
- SUBSTITUTION_BUNDLE_RESOURCES_DIR, // {{bundle_resources_dir}}
+ SUBSTITUTION_BUNDLE_ROOT_DIR, // {{bundle_root_dir}}
+ SUBSTITUTION_BUNDLE_CONTENTS_DIR, // {{bundle_contents_dir}}
+ SUBSTITUTION_BUNDLE_RESOURCES_DIR, // {{bundle_resources_dir}}
SUBSTITUTION_BUNDLE_EXECUTABLE_DIR, // {{bundle_executable_dir}}
- SUBSTITUTION_BUNDLE_PLUGINS_DIR, // {{bundle_plugins_dir}}
+ SUBSTITUTION_BUNDLE_PLUGINS_DIR, // {{bundle_plugins_dir}}
// Valid for compile_xcassets tool.
- SUBSTITUTION_BUNDLE_PRODUCT_TYPE, // {{bundle_product_type}}
+ SUBSTITUTION_BUNDLE_PRODUCT_TYPE, // {{bundle_product_type}}
+ SUBSTITUTION_BUNDLE_PARTIAL_INFO_PLIST, // {{bundle_partial_info_plist}}
// Used only for the args of actions.
SUBSTITUTION_RSP_FILE_NAME, // {{response_file_name}}
diff --git a/chromium/tools/gn/variables.cc b/chromium/tools/gn/variables.cc
index 5b8018135f9..c8c75b82f76 100644
--- a/chromium/tools/gn/variables.cc
+++ b/chromium/tools/gn/variables.cc
@@ -586,18 +586,36 @@ Example
bundle_data("info_plist") {
sources = [ "Info.plist" ]
- outputs = [ "{{bundle_root_dir}}/Info.plist" ]
+ outputs = [ "{{bundle_contents_dir}}/Info.plist" ]
}
create_bundle("doom_melon.app") {
deps = [ ":info_plist" ]
- bundle_root_dir = root_build_dir + "/doom_melon.app/Contents"
- bundle_resources_dir = bundle_root_dir + "/Resources"
- bundle_executable_dir = bundle_root_dir + "/MacOS"
- bundle_plugins_dir = bundle_root_dir + "/PlugIns"
+ bundle_root_dir = "${root_build_dir}/doom_melon.app"
+ bundle_contents_dir = "${bundle_root_dir}/Contents"
+ bundle_resources_dir = "${bundle_contents_dir}/Resources"
+ bundle_executable_dir = "${bundle_contents_dir}/MacOS"
+ bundle_plugins_dir = "${bundle_contents_dir}/PlugIns"
}
)";
+const char kBundleContentsDir[] = "bundle_contents_dir";
+const char kBundleContentsDir_HelpShort[] =
+ "bundle_contents_dir: "
+ "Expansion of {{bundle_contents_dir}} in create_bundle.";
+const char kBundleContentsDir_Help[] =
+ R"(bundle_contents_dir: Expansion of {{bundle_contents_dir}} in
+ create_bundle.
+
+ A string corresponding to a path in $root_build_dir.
+
+ This string is used by the "create_bundle" target to expand the
+ {{bundle_contents_dir}} of the "bundle_data" target it depends on. This must
+ correspond to a path under "bundle_root_dir".
+
+ See "gn help bundle_root_dir" for examples.
+)";
+
const char kBundleResourcesDir[] = "bundle_resources_dir";
const char kBundleResourcesDir_HelpShort[] =
"bundle_resources_dir: "
@@ -954,7 +972,7 @@ const char kData_Help[] =
However, no verification is done on these so GN doesn't enforce this. The
paths are just rebased and passed along when requested.
- Note: On iOS and OS X, create_bundle targets will not be recursed into when
+ Note: On iOS and macOS, create_bundle targets will not be recursed into when
gathering data. See "gn help create_bundle" for details.
See "gn help runtime_deps" for how these are used.
@@ -975,7 +993,7 @@ const char kDataDeps_Help[] =
This is normally used for things like plugins or helper programs that a
target needs at runtime.
- Note: On iOS and OS X, create_bundle targets will not be recursed into when
+ Note: On iOS and macOS, create_bundle targets will not be recursed into when
gathering data_deps. See "gn help create_bundle" for details.
See also "gn help deps" and "gn help data".
@@ -1404,6 +1422,20 @@ Example
}
)";
+const char kPartialInfoPlist[] = "partial_info_plist";
+const char kPartialInfoPlist_HelpShort[] =
+ "partial_info_plist: [filename] Path plist from asset catalog compiler.";
+const char kPartialInfoPlist_Help[] =
+ R"(partial_info_plist: [filename] Path plist from asset catalog compiler.
+
+ Valid for create_bundle target, corresponds to the path for the partial
+ Info.plist created by the asset catalog compiler that needs to be merged
+ with the application Info.plist (usually done by the code signing script).
+
+ The file will be generated regardless of whether the asset compiler has
+ been invoked or not. See "gn help create_bundle".
+)";
+
const char kOutputs[] = "outputs";
const char kOutputs_HelpShort[] =
"outputs: [file list] Output files for actions and copy targets.";
@@ -1917,6 +1949,7 @@ const VariableInfoMap& GetTargetVariables() {
INSERT_VARIABLE(Asmflags)
INSERT_VARIABLE(AssertNoDeps)
INSERT_VARIABLE(BundleRootDir)
+ INSERT_VARIABLE(BundleContentsDir)
INSERT_VARIABLE(BundleResourcesDir)
INSERT_VARIABLE(BundleDepsFilter)
INSERT_VARIABLE(BundleExecutableDir)
@@ -1949,6 +1982,7 @@ const VariableInfoMap& GetTargetVariables() {
INSERT_VARIABLE(OutputName)
INSERT_VARIABLE(OutputPrefixOverride)
INSERT_VARIABLE(Outputs)
+ INSERT_VARIABLE(PartialInfoPlist)
INSERT_VARIABLE(Pool)
INSERT_VARIABLE(PrecompiledHeader)
INSERT_VARIABLE(PrecompiledHeaderType)
diff --git a/chromium/tools/gn/variables.h b/chromium/tools/gn/variables.h
index 0ea00d54909..0e3891d53a9 100644
--- a/chromium/tools/gn/variables.h
+++ b/chromium/tools/gn/variables.h
@@ -107,6 +107,10 @@ extern const char kBundleRootDir[];
extern const char kBundleRootDir_HelpShort[];
extern const char kBundleRootDir_Help[];
+extern const char kBundleContentsDir[];
+extern const char kBundleContentsDir_HelpShort[];
+extern const char kBundleContentsDir_Help[];
+
extern const char kBundleResourcesDir[];
extern const char kBundleResourcesDir_HelpShort[];
extern const char kBundleResourcesDir_Help[];
@@ -235,6 +239,10 @@ extern const char kOutputs[];
extern const char kOutputs_HelpShort[];
extern const char kOutputs_Help[];
+extern const char kPartialInfoPlist[];
+extern const char kPartialInfoPlist_HelpShort[];
+extern const char kPartialInfoPlist_Help[];
+
extern const char kPool[];
extern const char kPool_HelpShort[];
extern const char kPool_Help[];
diff --git a/chromium/tools/gn/visual_studio_writer.cc b/chromium/tools/gn/visual_studio_writer.cc
index 9f72a6f14a0..ad821a76959 100644
--- a/chromium/tools/gn/visual_studio_writer.cc
+++ b/chromium/tools/gn/visual_studio_writer.cc
@@ -8,10 +8,10 @@
#include <iterator>
#include <map>
#include <memory>
-#include <queue>
#include <set>
#include <string>
+#include "base/containers/queue.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
@@ -79,7 +79,7 @@ const char kVersionStringVs2013[] = "Visual Studio 2013"; // Visual Studio 2013
const char kVersionStringVs2015[] = "Visual Studio 2015"; // Visual Studio 2015
const char kVersionStringVs2017[] = "Visual Studio 2017"; // Visual Studio 2017
const char kWindowsKitsVersion[] = "10"; // Windows 10 SDK
-const char kWindowsKitsDefaultVersion[] = "10.0.14393.0"; // Windows 10 SDK
+const char kWindowsKitsDefaultVersion[] = "10.0.15063.0"; // Windows 10 SDK
const char kGuidTypeProject[] = "{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}";
const char kGuidTypeFolder[] = "{2150E333-8FDC-42A3-9474-1A3956D46DE8}";
@@ -205,7 +205,7 @@ bool FilterTargets(const BuildSettings* build_settings,
return true;
std::set<Label> labels;
- std::queue<const Target*> to_process;
+ base::queue<const Target*> to_process;
for (const Target* target : *targets) {
labels.insert(target->label());
to_process.push(target);
diff --git a/chromium/tools/gn/visual_studio_writer_unittest.cc b/chromium/tools/gn/visual_studio_writer_unittest.cc
index 1ad7c61c60d..6a7fbfc4314 100644
--- a/chromium/tools/gn/visual_studio_writer_unittest.cc
+++ b/chromium/tools/gn/visual_studio_writer_unittest.cc
@@ -29,7 +29,7 @@ std::string MakeTestPath(const std::string& path) {
TEST_F(VisualStudioWriterTest, ResolveSolutionFolders) {
VisualStudioWriter writer(setup_.build_settings(), "Win32",
VisualStudioWriter::Version::Vs2015,
- "10.0.14393.0");
+ "10.0.15063.0");
std::string path =
MakeTestPath("/foo/chromium/src/out/Debug/obj/base/base.vcxproj");
@@ -84,7 +84,7 @@ TEST_F(VisualStudioWriterTest, ResolveSolutionFolders) {
TEST_F(VisualStudioWriterTest, ResolveSolutionFolders_AbsPath) {
VisualStudioWriter writer(setup_.build_settings(), "Win32",
VisualStudioWriter::Version::Vs2015,
- "10.0.14393.0");
+ "10.0.15063.0");
std::string path =
MakeTestPath("/foo/chromium/src/out/Debug/obj/base/base.vcxproj");
diff --git a/chromium/tools/gn/xcode_object.cc b/chromium/tools/gn/xcode_object.cc
index 8a3147037ed..9d8e791c599 100644
--- a/chromium/tools/gn/xcode_object.cc
+++ b/chromium/tools/gn/xcode_object.cc
@@ -13,6 +13,8 @@
#include "base/memory/ptr_util.h"
#include "base/strings/string_util.h"
#include "tools/gn/filesystem_utils.h"
+#include "tools/gn/source_file.h"
+#include "tools/gn/source_file_type.h"
// Helper methods -------------------------------------------------------------
@@ -153,9 +155,10 @@ bool HasExplicitFileType(const base::StringPiece& ext) {
return ext == "dart";
}
-bool IsSourceFileForIndexing(const base::StringPiece& ext) {
- return ext == "c" || ext == "cc" || ext == "cpp" || ext == "cxx" ||
- ext == "m" || ext == "mm";
+bool IsSourceFileForIndexing(const SourceFile& src) {
+ const SourceFileType type = GetSourceFileType(src);
+ return type == SOURCE_C || type == SOURCE_CPP || type == SOURCE_M ||
+ type == SOURCE_MM;
}
void PrintValue(std::ostream& out, IndentRules rules, unsigned value) {
@@ -692,8 +695,7 @@ void PBXProject::AddSourceFile(const std::string& navigator_path,
PBXNativeTarget* target) {
PBXFileReference* file_reference =
sources_->AddSourceFile(navigator_path, source_path);
- base::StringPiece ext = FindExtension(&source_path);
- if (!IsSourceFileForIndexing(ext))
+ if (!IsSourceFileForIndexing(SourceFile(source_path)))
return;
DCHECK(target);
diff --git a/chromium/tools/gn/xcode_object_unittest.cc b/chromium/tools/gn/xcode_object_unittest.cc
index 4e04946ee9f..52a98b9b45f 100644
--- a/chromium/tools/gn/xcode_object_unittest.cc
+++ b/chromium/tools/gn/xcode_object_unittest.cc
@@ -323,3 +323,113 @@ TEST(XcodeObject, ClassToString) {
EXPECT_EQ("PBXShellScriptBuildPhase",
ToString(PBXShellScriptBuildPhaseClass));
}
+
+// Tests the mapping between PBXObject and it's name as a string.
+TEST(XcodeObject, PBXSourcesBuildPhaseName) {
+ std::unique_ptr<PBXSourcesBuildPhase> pbx_sources_build_phase =
+ GetPBXSourcesBuildPhaseObject();
+ EXPECT_EQ("Sources", pbx_sources_build_phase->Name());
+}
+
+TEST(XcodeObject, PBXFrameworksBuildPhaseName) {
+ std::unique_ptr<PBXFrameworksBuildPhase> pbx_frameworks_build_phase =
+ GetPBXFrameworksBuildPhaseObject();
+ EXPECT_EQ("Frameworks", pbx_frameworks_build_phase->Name());
+}
+
+TEST(XcodeObject, PBXShellScriptBuildPhaseName) {
+ std::unique_ptr<PBXShellScriptBuildPhase> pbx_shell_script_build_phase =
+ GetPBXShellScriptBuildPhaseObject();
+ EXPECT_EQ("Action \"Compile and copy name via ninja\"",
+ pbx_shell_script_build_phase->Name());
+}
+
+TEST(XcodeObject, PBXGroupName) {
+ PBXGroup pbx_group_with_name(std::string(), "name");
+ EXPECT_EQ("name", pbx_group_with_name.Name());
+
+ PBXGroup pbx_group_with_path("path", std::string());
+ EXPECT_EQ("path", pbx_group_with_path.Name());
+
+ PBXGroup pbx_group_empty{std::string(), std::string()};
+ EXPECT_EQ(std::string(), pbx_group_empty.Name());
+}
+
+TEST(XcodeObject, PBXProjectName) {
+ std::unique_ptr<PBXProject> pbx_project = GetPBXProjectObject();
+ EXPECT_EQ("project", pbx_project->Name());
+}
+
+TEST(XcodeObject, PBXFileReferenceName) {
+ std::unique_ptr<PBXFileReference> pbx_file_reference =
+ GetPBXFileReferenceObject();
+ EXPECT_EQ("product.app", pbx_file_reference->Name());
+}
+
+TEST(XcodeObject, PBXBuildFileName) {
+ std::unique_ptr<PBXFileReference> pbx_file_reference =
+ GetPBXFileReferenceObject();
+ std::unique_ptr<PBXSourcesBuildPhase> pbx_sources_build_phase =
+ GetPBXSourcesBuildPhaseObject();
+ std::unique_ptr<PBXBuildFile> pbx_build_file = GetPBXBuildFileObject(
+ pbx_file_reference.get(), pbx_sources_build_phase.get());
+ EXPECT_EQ("product.app in Sources", pbx_build_file->Name());
+}
+
+TEST(XcodeObject, PBXAggregateTargetName) {
+ std::unique_ptr<PBXAggregateTarget> pbx_aggregate_target =
+ GetPBXAggregateTargetObject();
+ EXPECT_EQ("target_name", pbx_aggregate_target->Name());
+}
+
+TEST(XcodeObject, PBXNativeTargetName) {
+ std::unique_ptr<PBXFileReference> product_reference =
+ GetPBXFileReferenceObject();
+ std::unique_ptr<PBXNativeTarget> pbx_native_target =
+ GetPBXNativeTargetObject(product_reference.get());
+ EXPECT_EQ("target_name", pbx_native_target->Name());
+}
+
+TEST(XcodeObject, PBXContainerItemProxyName) {
+ std::unique_ptr<PBXFileReference> product_reference =
+ GetPBXFileReferenceObject();
+ std::unique_ptr<PBXNativeTarget> pbx_native_target =
+ GetPBXNativeTargetObject(product_reference.get());
+ std::unique_ptr<PBXProject> pbx_project = GetPBXProjectObject();
+ std::unique_ptr<PBXContainerItemProxy> pbx_container_item_proxy =
+ GetPBXContainerItemProxyObject(pbx_project.get(),
+ pbx_native_target.get());
+ EXPECT_EQ("PBXContainerItemProxy", pbx_container_item_proxy->Name());
+}
+
+TEST(XcodeObject, PBXTargetDependencyName) {
+ std::unique_ptr<PBXFileReference> product_reference =
+ GetPBXFileReferenceObject();
+ std::unique_ptr<PBXProject> pbx_project = GetPBXProjectObject();
+ std::unique_ptr<PBXNativeTarget> pbx_native_target =
+ GetPBXNativeTargetObject(product_reference.get());
+ std::unique_ptr<PBXContainerItemProxy> pbx_container_item_proxy =
+ GetPBXContainerItemProxyObject(pbx_project.get(),
+ pbx_native_target.get());
+ std::unique_ptr<PBXTargetDependency> pbx_target_dependency =
+ GetPBXTargetDependencyObject(pbx_native_target.get(),
+ std::move(pbx_container_item_proxy));
+ EXPECT_EQ("PBXTargetDependency", pbx_target_dependency->Name());
+}
+
+TEST(XcodeObject, XCBuildConfigurationName) {
+ std::unique_ptr<XCBuildConfiguration> xc_build_configuration =
+ GetXCBuildConfigurationObject();
+ EXPECT_EQ("config_name", xc_build_configuration->Name());
+}
+
+TEST(XcodeObject, XCConfigurationListName) {
+ std::unique_ptr<PBXFileReference> product_reference =
+ GetPBXFileReferenceObject();
+ std::unique_ptr<PBXNativeTarget> pbx_native_target =
+ GetPBXNativeTargetObject(product_reference.get());
+ std::unique_ptr<XCConfigurationList> xc_configuration_list =
+ GetXCConfigurationListObject(pbx_native_target.get());
+ EXPECT_EQ("Build configuration list for PBXNativeTarget \"target_name\"",
+ xc_configuration_list->Name());
+}
diff --git a/chromium/tools/grit/grit/format/data_pack.py b/chromium/tools/grit/grit/format/data_pack.py
index 15d977edca5..f1b1660e874 100755
--- a/chromium/tools/grit/grit/format/data_pack.py
+++ b/chromium/tools/grit/grit/format/data_pack.py
@@ -40,6 +40,7 @@ DataPackContents = collections.namedtuple(
def Format(root, lang='en', output_dir='.'):
"""Writes out the data pack file format (platform agnostic resource file)."""
data = {}
+ root.info = []
for node in root.ActiveDescendants():
with node:
if isinstance(node, (include.IncludeNode, message.MessageNode,
@@ -47,6 +48,8 @@ def Format(root, lang='en', output_dir='.'):
id, value = node.GetDataPackPair(lang, UTF8)
if value is not None:
data[id] = value
+ root.info.append(
+ '{},{},{}'.format(node.attrs.get('name'), id, node.source))
return WriteDataPackToString(data, UTF8)
@@ -173,6 +176,7 @@ def RePack(output_file, input_files, whitelist_file=None,
inconsistent.
"""
input_data_packs = [ReadDataPack(filename) for filename in input_files]
+ input_info_files = [filename + '.info' for filename in input_files]
whitelist = None
if whitelist_file:
whitelist = util.ReadFile(whitelist_file, util.RAW_TEXT).strip().split('\n')
@@ -180,6 +184,10 @@ def RePack(output_file, input_files, whitelist_file=None,
resources, encoding = RePackFromDataPackStrings(
input_data_packs, whitelist, suppress_removed_key_output)
WriteDataPack(resources, output_file, encoding)
+ with open(output_file + '.info', 'w') as output_info_file:
+ for filename in input_info_files:
+ with open(filename, 'r') as info_file:
+ output_info_file.writelines(info_file.readlines())
def RePackFromDataPackStrings(inputs, whitelist,
diff --git a/chromium/tools/grit/grit/format/policy_templates/PRESUBMIT.py b/chromium/tools/grit/grit/format/policy_templates/PRESUBMIT.py
deleted file mode 100755
index dca24a0c484..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/PRESUBMIT.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-UNIT_TESTS = [
- 'policy_template_generator_unittest',
- 'writers.adm_writer_unittest',
- 'writers.adml_writer_unittest',
- 'writers.admx_writer_unittest',
- 'writers.android_policy_writer_unittest',
- 'writers.doc_writer_unittest',
- 'writers.json_writer_unittest',
- 'writers.plist_strings_writer_unittest',
- 'writers.plist_writer_unittest',
- 'writers.reg_writer_unittest',
- 'writers.template_writer_unittest'
-]
-
-def CheckChangeOnUpload(input_api, output_api):
- return input_api.canned_checks.RunPythonUnitTests(input_api,
- output_api,
- UNIT_TESTS)
-
-
-def CheckChangeOnCommit(input_api, output_api):
- return input_api.canned_checks.RunPythonUnitTests(input_api,
- output_api,
- UNIT_TESTS)
diff --git a/chromium/tools/grit/grit/format/policy_templates/__init__.py b/chromium/tools/grit/grit/format/policy_templates/__init__.py
deleted file mode 100755
index 21cab6548be..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Module grit.format.policy_templates
-'''
-
-pass
-
diff --git a/chromium/tools/grit/grit/format/policy_templates/policy_template_generator.py b/chromium/tools/grit/grit/format/policy_templates/policy_template_generator.py
deleted file mode 100755
index 1444a8dcde6..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/policy_template_generator.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import copy
-
-
-class PolicyTemplateGenerator:
- '''Generates template text for a particular platform.
-
- This class is used to traverse a JSON structure from a .json template
- definition metafile and merge GUI message string definitions that come
- from a .grd resource tree onto it. After this, it can be used to output
- this data to policy template files using TemplateWriter objects.
- '''
-
- def _ImportMessage(self, msg_txt):
- msg_txt = msg_txt.decode('utf-8')
- # Replace the placeholder of app name.
- msg_txt = msg_txt.replace('$1', self._config['app_name'])
- msg_txt = msg_txt.replace('$2', self._config['os_name'])
- msg_txt = msg_txt.replace('$3', self._config['frame_name'])
- # Strip spaces and escape newlines.
- lines = msg_txt.split('\n')
- lines = [line.strip() for line in lines]
- return "\n".join(lines)
-
- def __init__(self, config, policy_data):
- '''Initializes this object with all the data necessary to output a
- policy template.
-
- Args:
- messages: An identifier to string dictionary of all the localized
- messages that might appear in the policy template.
- policy_definitions: The list of defined policies and groups, as
- parsed from the policy metafile. Note that this list is passed by
- reference and its contents are modified.
- See chrome/app/policy.policy_templates.json for description and
- content.
- '''
- # List of all the policies:
- self._policy_data = copy.deepcopy(policy_data)
- # Localized messages to be inserted to the policy_definitions structure:
- self._messages = self._policy_data['messages']
- self._config = config
- for key in self._messages.keys():
- self._messages[key]['text'] = self._ImportMessage(
- self._messages[key]['text'])
- self._policy_definitions = self._policy_data['policy_definitions']
- self._ProcessPolicyList(self._policy_definitions)
-
- def _ProcessSupportedOn(self, supported_on):
- '''Parses and converts the string items of the list of supported platforms
- into dictionaries.
-
- Args:
- supported_on: The list of supported platforms. E.g.:
- ['chrome.win:8-10', 'chrome_frame:10-']
-
- Returns:
- supported_on: The list with its items converted to dictionaries. E.g.:
- [{
- 'product': 'chrome',
- 'platforms': 'win',
- 'since_version': '8',
- 'until_version': '10'
- }, {
- 'product': 'chrome_frame',
- 'platforms': 'win',
- 'since_version': '10',
- 'until_version': ''
- }]
- '''
- result = []
- for supported_on_item in supported_on:
- product_platform_part, version_part = supported_on_item.split(':')
-
- if '.' in product_platform_part:
- product, platform = product_platform_part.split('.')
- if platform == '*':
- # e.g.: 'chrome.*:8-10'
- platforms = ['linux', 'mac', 'win']
- else:
- # e.g.: 'chrome.win:-10'
- platforms = [platform]
- else:
- # e.g.: 'chrome_frame:7-'
- product, platform = {
- 'android': ('chrome', 'android'),
- 'webview_android': ('webview', 'android'),
- 'chrome_os': ('chrome_os', 'chrome_os'),
- 'chrome_frame': ('chrome_frame', 'win'),
- }[product_platform_part]
- platforms = [platform]
- since_version, until_version = version_part.split('-')
- result.append({
- 'product': product,
- 'platforms': platforms,
- 'since_version': since_version,
- 'until_version': until_version
- })
- return result
-
- def _ProcessPolicy(self, policy):
- '''Processes localized message strings in a policy or a group.
- Also breaks up the content of 'supported_on' attribute into a list.
-
- Args:
- policy: The data structure of the policy or group, that will get message
- strings here.
- '''
- policy['desc'] = self._ImportMessage(policy['desc'])
- policy['caption'] = self._ImportMessage(policy['caption'])
- if 'label' in policy:
- policy['label'] = self._ImportMessage(policy['label'])
- if 'arc_support' in policy:
- policy['arc_support'] = self._ImportMessage(policy['arc_support'])
-
-
- if policy['type'] == 'group':
- self._ProcessPolicyList(policy['policies'])
- elif policy['type'] in ('string-enum', 'int-enum', 'string-enum-list'):
- # Iterate through all the items of an enum-type policy, and add captions.
- for item in policy['items']:
- item['caption'] = self._ImportMessage(item['caption'])
- if policy['type'] != 'group':
- if not 'label' in policy:
- # If 'label' is not specified, then it defaults to 'caption':
- policy['label'] = policy['caption']
- policy['supported_on'] = self._ProcessSupportedOn(policy['supported_on'])
-
- def _ProcessPolicyList(self, policy_list):
- '''Adds localized message strings to each item in a list of policies and
- groups. Also breaks up the content of 'supported_on' attributes into lists
- of dictionaries.
-
- Args:
- policy_list: A list of policies and groups. Message strings will be added
- for each item and to their child items, recursively.
- '''
- for policy in policy_list:
- self._ProcessPolicy(policy)
-
- def GetTemplateText(self, template_writer):
- '''Generates the text of the template from the arguments given
- to the constructor, using a given TemplateWriter.
-
- Args:
- template_writer: An object implementing TemplateWriter. Its methods
- are called here for each item of self._policy_groups.
-
- Returns:
- The text of the generated template.
- '''
- return template_writer.WriteTemplate(self._policy_data)
diff --git a/chromium/tools/grit/grit/format/policy_templates/policy_template_generator_unittest.py b/chromium/tools/grit/grit/format/policy_templates/policy_template_generator_unittest.py
deleted file mode 100755
index adc4a224f35..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/policy_template_generator_unittest.py
+++ /dev/null
@@ -1,395 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
-
-import unittest
-
-from grit.format.policy_templates import policy_template_generator
-from grit.format.policy_templates.writers import mock_writer
-from grit.format.policy_templates.writers import template_writer
-
-
-class PolicyTemplateGeneratorUnittest(unittest.TestCase):
- '''Unit tests for policy_template_generator.py.'''
-
- def do_test(self, policy_data, writer):
- '''Executes a test case.
-
- Creates and invokes an instance of PolicyTemplateGenerator with
- the given arguments.
-
- Notice: Plain comments are used in test methods instead of docstrings,
- so that method names do not get overridden by the docstrings in the
- test output.
-
- Args:
- policy_data: The list of policies and groups as it would be
- loaded from policy_templates.json.
- writer: A writer used for this test. It is usually derived from
- mock_writer.MockWriter.
- '''
- writer.tester = self
- config = {
- 'app_name': '_app_name',
- 'frame_name': '_frame_name',
- 'os_name': '_os_name',
- }
- if not 'messages' in policy_data:
- policy_data['messages'] = {}
- if not 'placeholders' in policy_data:
- policy_data['placeholders'] = []
- if not 'policy_definitions' in policy_data:
- policy_data['policy_definitions'] = []
- policy_generator = policy_template_generator.PolicyTemplateGenerator(
- config,
- policy_data)
- res = policy_generator.GetTemplateText(writer)
- writer.Test()
- return res
-
- def testSequence(self):
- # Test the sequence of invoking the basic PolicyWriter operations,
- # in case of empty input data structures.
- class LocalMockWriter(mock_writer.MockWriter):
- def __init__(self):
- self.log = 'init;'
- def Init(self):
- self.log += 'prepare;'
- def BeginTemplate(self):
- self.log += 'begin;'
- def EndTemplate(self):
- self.log += 'end;'
- def GetTemplateText(self):
- self.log += 'get_text;'
- return 'writer_result_string'
- def Test(self):
- self.tester.assertEquals(self.log,
- 'init;prepare;begin;end;get_text;')
- result = self.do_test({}, LocalMockWriter())
- self.assertEquals(result, 'writer_result_string')
-
- def testEmptyGroups(self):
- # Test that empty policy groups are not passed to the writer.
- policies_mock = {
- 'policy_definitions': [
- {'name': 'Group1', 'type': 'group', 'policies': [],
- 'desc': '', 'caption': ''},
- {'name': 'Group2', 'type': 'group', 'policies': [],
- 'desc': '', 'caption': ''},
- {'name': 'Group3', 'type': 'group', 'policies': [],
- 'desc': '', 'caption': ''},
- ]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def __init__(self):
- self.log = ''
- def BeginPolicyGroup(self, group):
- self.log += '['
- def EndPolicyGroup(self):
- self.log += ']'
- def Test(self):
- self.tester.assertEquals(self.log, '')
- self.do_test(policies_mock, LocalMockWriter())
-
- def testGroups(self):
- # Test that policy groups are passed to the writer in the correct order.
- policies_mock = {
- 'policy_definitions': [
- {
- 'name': 'Group1', 'type': 'group',
- 'caption': '', 'desc': '',
- 'policies': [{'name': 'TAG1', 'type': 'mock', 'supported_on': [],
- 'caption': '', 'desc': ''}]
- },
- {
- 'name': 'Group2', 'type': 'group',
- 'caption': '', 'desc': '',
- 'policies': [{'name': 'TAG2', 'type': 'mock', 'supported_on': [],
- 'caption': '', 'desc': ''}]
- },
- {
- 'name': 'Group3', 'type': 'group',
- 'caption': '', 'desc': '',
- 'policies': [{'name': 'TAG3', 'type': 'mock', 'supported_on': [],
- 'caption': '', 'desc': ''}]
- },
- ]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def __init__(self):
- self.log = ''
- def BeginPolicyGroup(self, group):
- self.log += '[' + group['policies'][0]['name']
- def EndPolicyGroup(self):
- self.log += ']'
- def Test(self):
- self.tester.assertEquals(self.log, '[TAG1][TAG2][TAG3]')
- self.do_test(policies_mock, LocalMockWriter())
-
- def testPolicies(self):
- # Test that policies are passed to the writer in the correct order.
- policy_defs_mock = {
- 'policy_definitions': [
- {
- 'name': 'Group1',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [
- {'name': 'Group1Policy1', 'type': 'string', 'supported_on': [],
- 'caption': '', 'desc': ''},
- {'name': 'Group1Policy2', 'type': 'string', 'supported_on': [],
- 'caption': '', 'desc': ''},
- ]
- },
- {
- 'name': 'Group2',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [
- {'name': 'Group2Policy3', 'type': 'string', 'supported_on': [],
- 'caption': '', 'desc': ''},
- ]
- }
- ]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def __init__(self):
- self.policy_name = None
- self.policy_list = []
- def BeginPolicyGroup(self, group):
- self.group = group;
- def EndPolicyGroup(self):
- self.group = None
- def WritePolicy(self, policy):
- self.tester.assertEquals(policy['name'][0:6], self.group['name'])
- self.policy_list.append(policy['name'])
- def Test(self):
- self.tester.assertEquals(
- self.policy_list,
- ['Group1Policy1', 'Group1Policy2', 'Group2Policy3'])
- self.do_test( policy_defs_mock, LocalMockWriter())
-
- def testPolicyTexts(self):
- # Test that GUI messages of policies all get placeholders replaced.
- policy_data_mock = {
- 'policy_definitions': [
- {
- 'name': 'Group1',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [
- {
- 'name': 'Policy1',
- 'caption': '1. app_name -- $1',
- 'label': '2. os_name -- $2',
- 'desc': '3. frame_name -- $3',
- 'type': 'string',
- 'supported_on': []
- },
- ]
- }
- ]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def WritePolicy(self, policy):
- if policy['name'] == 'Policy1':
- self.tester.assertEquals(policy['caption'],
- '1. app_name -- _app_name')
- self.tester.assertEquals(policy['label'],
- '2. os_name -- _os_name')
- self.tester.assertEquals(policy['desc'],
- '3. frame_name -- _frame_name')
- elif policy['name'] == 'Group1':
- pass
- else:
- self.tester.fail()
- self.do_test(policy_data_mock, LocalMockWriter())
-
- def testIntEnumTexts(self):
- # Test that GUI messages are assigned correctly to int-enums
- # (aka dropdown menus).
- policy_defs_mock = {
- 'policy_definitions': [{
- 'name': 'Policy1',
- 'type': 'int-enum',
- 'caption': '', 'desc': '',
- 'supported_on': [],
- 'items': [
- {'name': 'item1', 'value': 0, 'caption': 'string1', 'desc': ''},
- {'name': 'item2', 'value': 1, 'caption': 'string2', 'desc': ''},
- {'name': 'item3', 'value': 3, 'caption': 'string3', 'desc': ''},
- ]
- }]
- }
-
- class LocalMockWriter(mock_writer.MockWriter):
- def WritePolicy(self, policy):
- self.tester.assertEquals(policy['items'][0]['caption'], 'string1')
- self.tester.assertEquals(policy['items'][1]['caption'], 'string2')
- self.tester.assertEquals(policy['items'][2]['caption'], 'string3')
- self.do_test(policy_defs_mock, LocalMockWriter())
-
- def testStringEnumTexts(self):
- # Test that GUI messages are assigned correctly to string-enums
- # (aka dropdown menus).
- policy_data_mock = {
- 'policy_definitions': [{
- 'name': 'Policy1',
- 'type': 'string-enum',
- 'caption': '', 'desc': '',
- 'supported_on': [],
- 'items': [
- {'name': 'item1', 'value': 'one', 'caption': 'string1', 'desc': ''},
- {'name': 'item2', 'value': 'two', 'caption': 'string2', 'desc': ''},
- {'name': 'item3', 'value': 'three', 'caption': 'string3', 'desc': ''},
- ]
- }]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def WritePolicy(self, policy):
- self.tester.assertEquals(policy['items'][0]['caption'], 'string1')
- self.tester.assertEquals(policy['items'][1]['caption'], 'string2')
- self.tester.assertEquals(policy['items'][2]['caption'], 'string3')
- self.do_test(policy_data_mock, LocalMockWriter())
-
- def testStringEnumTexts(self):
- # Test that GUI messages are assigned correctly to string-enums
- # (aka dropdown menus).
- policy_data_mock = {
- 'policy_definitions': [{
- 'name': 'Policy1',
- 'type': 'string-enum-list',
- 'caption': '', 'desc': '',
- 'supported_on': [],
- 'items': [
- {'name': 'item1', 'value': 'one', 'caption': 'string1', 'desc': ''},
- {'name': 'item2', 'value': 'two', 'caption': 'string2', 'desc': ''},
- {'name': 'item3', 'value': 'three', 'caption': 'string3', 'desc': ''},
- ]
- }]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def WritePolicy(self, policy):
- self.tester.assertEquals(policy['items'][0]['caption'], 'string1')
- self.tester.assertEquals(policy['items'][1]['caption'], 'string2')
- self.tester.assertEquals(policy['items'][2]['caption'], 'string3')
- self.do_test(policy_data_mock, LocalMockWriter())
-
- def testPolicyFiltering(self):
- # Test that policies are filtered correctly based on their annotations.
- policy_data_mock = {
- 'policy_definitions': [
- {
- 'name': 'Group1',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [
- {
- 'name': 'Group1Policy1',
- 'type': 'string',
- 'caption': '',
- 'desc': '',
- 'supported_on': [
- 'chrome.aaa:8-', 'chrome.bbb:8-', 'chrome.ccc:8-'
- ]
- },
- {
- 'name': 'Group1Policy2',
- 'type': 'string',
- 'caption': '',
- 'desc': '',
- 'supported_on': ['chrome.ddd:8-']
- },
- ]
- }, {
- 'name': 'Group2',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [
- {
- 'name': 'Group2Policy3',
- 'type': 'string',
- 'caption': '',
- 'desc': '',
- 'supported_on': ['chrome.eee:8-']
- },
- ]
- }, {
- 'name': 'SinglePolicy',
- 'type': 'int',
- 'caption': '',
- 'desc': '',
- 'supported_on': ['chrome.eee:8-']
- }
- ]
- }
- # This writer accumulates the list of policies it is asked to write.
- # This list is stored in the result_list member variable and can
- # be used later for assertions.
- class LocalMockWriter(mock_writer.MockWriter):
- def __init__(self, platforms):
- self.platforms = platforms
- self.policy_name = None
- self.result_list = []
- def BeginPolicyGroup(self, group):
- self.group = group;
- self.result_list.append('begin_' + group['name'])
- def EndPolicyGroup(self):
- self.result_list.append('end_group')
- self.group = None
- def WritePolicy(self, policy):
- self.result_list.append(policy['name'])
- def IsPolicySupported(self, policy):
- # Call the original (non-mock) implementation of this method.
- return template_writer.TemplateWriter.IsPolicySupported(self, policy)
-
- local_mock_writer = LocalMockWriter(['eee'])
- self.do_test(policy_data_mock, local_mock_writer)
- # Test that only policies of platform 'eee' were written:
- self.assertEquals(
- local_mock_writer.result_list,
- ['begin_Group2', 'Group2Policy3', 'end_group', 'SinglePolicy'])
-
- local_mock_writer = LocalMockWriter(['ddd', 'bbb'])
- self.do_test(policy_data_mock, local_mock_writer)
- # Test that only policies of platforms 'ddd' and 'bbb' were written:
- self.assertEquals(
- local_mock_writer.result_list,
- ['begin_Group1', 'Group1Policy1', 'Group1Policy2', 'end_group'])
-
- def testSortingInvoked(self):
- # Tests that policy-sorting happens before passing policies to the writer.
- policy_data = {
- 'policy_definitions': [
- {'name': 'zp', 'type': 'string', 'supported_on': [],
- 'caption': '', 'desc': ''},
- {'name': 'ap', 'type': 'string', 'supported_on': [],
- 'caption': '', 'desc': ''},
- ]
- }
- class LocalMockWriter(mock_writer.MockWriter):
- def __init__(self):
- self.result_list = []
- def WritePolicy(self, policy):
- self.result_list.append(policy['name'])
- def Test(self):
- self.tester.assertEquals(
- self.result_list,
- ['ap', 'zp'])
- self.do_test(policy_data, LocalMockWriter())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/template_formatter.py b/chromium/tools/grit/grit/format/policy_templates/template_formatter.py
deleted file mode 100755
index 53b84ec0a7d..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/template_formatter.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import sys
-from functools import partial
-
-from grit.format.policy_templates import policy_template_generator
-from grit.format.policy_templates import writer_configuration
-from grit.node import misc
-from grit.node import structure
-
-
-def GetFormatter(type):
- return partial(_TemplateFormatter,
- 'grit.format.policy_templates.writers.%s_writer' % type)
-
-
-def _TemplateFormatter(writer_module_name, root, lang, output_dir):
- '''Creates a template file corresponding to an <output> node of the grit
- tree.
-
- More precisely, processes the whole grit tree for a given <output> node whose
- type is one of adm, plist, plist_strings, admx, adml, doc, json, reg.
- The result of processing is a policy template file with the given type and
- language of the <output> node. This function does the interfacing with
- grit, but the actual template-generating work is done in
- policy_template_generator.PolicyTemplateGenerator.
-
- Args:
- writer_name: A string identifying the TemplateWriter subclass used
- for generating the output.
- root: the <grit> root node of the grit tree.
- lang: the language of outputted text, e.g.: 'en'
- output_dir: The output directory, currently unused here.
-
- Yields the text of the template file.
- '''
- __import__(writer_module_name)
- writer_module = sys.modules[writer_module_name]
- config = writer_configuration.GetConfigurationForBuild(root.defines)
- policy_data = _ParseGritNodes(root, lang)
- policy_generator = \
- policy_template_generator.PolicyTemplateGenerator(config, policy_data)
- writer = writer_module.GetWriter(config)
- yield policy_generator.GetTemplateText(writer)
-
-
-def _ParseGritNodes(root, lang):
- '''Collects the necessary information from the grit tree:
- the message strings and the policy definitions.
-
- Args:
- root: The root of the grit tree.
- lang: the language of outputted text, e.g.: 'en'
-
- Returns:
- Policy data.
- '''
- policy_data = None
- for item in root.ActiveDescendants():
- with item:
- if (isinstance(item, structure.StructureNode) and
- item.attrs['type'] == 'policy_template_metafile'):
- assert policy_data is None
- json_text = item.gatherer.Translate(
- lang,
- pseudo_if_not_available=item.PseudoIsAllowed(),
- fallback_to_english=item.ShouldFallbackToEnglish())
- policy_data = eval(json_text)
- return policy_data
diff --git a/chromium/tools/grit/grit/format/policy_templates/writer_configuration.py b/chromium/tools/grit/grit/format/policy_templates/writer_configuration.py
deleted file mode 100755
index 728a3cf09e4..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writer_configuration.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-def GetConfigurationForBuild(defines):
- '''Returns a configuration dictionary for the given build that contains
- build-specific settings and information.
-
- Args:
- defines: Definitions coming from the build system.
-
- Raises:
- Exception: If 'defines' contains an unknown build-type.
- '''
- # The prefix of key names in config determines which writer will use their
- # corresponding values:
- # win: Both ADM and ADMX.
- # mac: Only plist.
- # admx: Only ADMX.
- # adm: Only ADM.
- # none/other: Used by all the writers.
- if '_chromium' in defines:
- config = {
- 'build': 'chromium',
- 'app_name': 'Chromium',
- 'frame_name': 'Chromium Frame',
- 'os_name': 'Chromium OS',
- 'webview_name': 'Chromium WebView',
- 'win_reg_mandatory_key_name': 'Software\\Policies\\Chromium',
- 'win_reg_recommended_key_name':
- 'Software\\Policies\\Chromium\\Recommended',
- 'win_mandatory_category_path': ['chromium'],
- 'win_recommended_category_path': ['chromium_recommended'],
- 'win_category_path_strings': {
- 'chromium': 'Chromium',
- 'chromium_recommended': 'Chromium - {doc_recommended}'
- },
- 'admx_namespace': 'Chromium.Policies.Chromium',
- 'admx_prefix': 'chromium',
- 'linux_policy_path': '/etc/chromium/policies/',
- }
- elif '_google_chrome' in defines:
- config = {
- 'build': 'chrome',
- 'app_name': 'Google Chrome',
- 'frame_name': 'Google Chrome Frame',
- 'os_name': 'Google Chrome OS',
- 'webview_name': 'Android System WebView',
- 'win_reg_mandatory_key_name': 'Software\\Policies\\Google\\Chrome',
- 'win_reg_recommended_key_name':
- 'Software\\Policies\\Google\\Chrome\\Recommended',
- # Note: Google:Cat_Google references Google.Policies from external
- # in google.admx file.
- 'win_mandatory_category_path': ['Google:Cat_Google', 'googlechrome'],
- 'win_recommended_category_path':
- ['Google:Cat_Google', 'googlechrome_recommended'],
- 'win_category_path_strings': {
- # Strings in curly braces is looked up from localized 'messages' in
- # policy_templates.json.
- 'googlechrome': 'Google Chrome',
- 'googlechrome_recommended': 'Google Chrome - {doc_recommended}'
- },
- # The string 'Google' is defined in google.adml for ADMX, but ADM doesn't
- # support external references, so we define this map here.
- 'adm_category_path_strings': { 'Google:Cat_Google': 'Google' },
- 'admx_namespace': 'Google.Policies.Chrome',
- 'admx_prefix': 'chrome',
- 'admx_using_namespaces': {
- 'Google': 'Google.Policies' # prefix: namespace
- },
- 'linux_policy_path': '/etc/opt/chrome/policies/',
- }
- else:
- raise Exception('Unknown build')
- if 'version' in defines:
- config['version'] = defines['version']
- config['win_group_policy_class'] = 'Both'
- config['win_supported_os'] = 'SUPPORTED_WINXPSP2'
- if 'mac_bundle_id' in defines:
- config['mac_bundle_id'] = defines['mac_bundle_id']
- config['android_webview_restriction_prefix'] = 'com.android.browser:'
- return config
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/__init__.py b/chromium/tools/grit/grit/format/policy_templates/writers/__init__.py
deleted file mode 100755
index fe6d1395609..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Module grit.format.policy_templates.writers
-'''
-
-pass
-
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/adm_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/adm_writer.py
deleted file mode 100755
index 26a2a6055c0..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/adm_writer.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from grit.format.policy_templates.writers import template_writer
-import re
-
-NEWLINE = '\r\n'
-
-
-def GetWriter(config):
- '''Factory method for creating AdmWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return AdmWriter(['win'], config)
-
-
-class IndentedStringBuilder:
- '''Utility class for building text with indented lines.'''
-
- def __init__(self):
- self.lines = []
- self.indent = ''
-
- def AddLine(self, string='', indent_diff=0):
- '''Appends a string with indentation and a linebreak to |self.lines|.
-
- Args:
- string: The string to print.
- indent_diff: the difference of indentation of the printed line,
- compared to the next/previous printed line. Increment occurs
- after printing the line, while decrement occurs before that.
- '''
- indent_diff *= 2
- if indent_diff < 0:
- self.indent = self.indent[(-indent_diff):]
- if string != '':
- self.lines.append(self.indent + string)
- else:
- self.lines.append('')
- if indent_diff > 0:
- self.indent += ''.ljust(indent_diff)
-
- def AddLines(self, other):
- '''Appends the content of another |IndentedStringBuilder| to |self.lines|.
- Indentation of the added lines will be the sum of |self.indent| and
- their original indentation.
-
- Args:
- other: The buffer from which lines are copied.
- '''
- for line in other.lines:
- self.AddLine(line)
-
- def ToString(self):
- '''Returns |self.lines| as text string.'''
- return NEWLINE.join(self.lines)
-
-
-class AdmWriter(template_writer.TemplateWriter):
- '''Class for generating policy templates in Windows ADM format.
- It is used by PolicyTemplateGenerator to write ADM files.
- '''
-
- TYPE_TO_INPUT = {
- 'string': 'EDITTEXT',
- 'int': 'NUMERIC',
- 'string-enum': 'DROPDOWNLIST',
- 'int-enum': 'DROPDOWNLIST',
- 'list': 'LISTBOX',
- 'string-enum-list': 'LISTBOX',
- 'dict': 'EDITTEXT'
- }
-
- def _Escape(self, string):
- return string.replace('.', '_')
-
- def _AddGuiString(self, name, value):
- # The |name| must be escaped.
- assert name == self._Escape(name)
- # Escape newlines in the value.
- value = value.replace('\n', '\\n')
- if name in self.strings_seen:
- err = ('%s was added as "%s" and now added again as "%s"' %
- (name, self.strings_seen[name], value))
- assert value == self.strings_seen[name], err
- else:
- self.strings_seen[name] = value
- line = '%s="%s"' % (name, value)
- self.strings.AddLine(line)
-
- def _WriteSupported(self, builder):
- builder.AddLine('#if version >= 4', 1)
- builder.AddLine('SUPPORTED !!SUPPORTED_WINXPSP2')
- builder.AddLine('#endif', -1)
-
- def _WritePart(self, policy, key_name, builder):
- '''Writes the PART ... END PART section of a policy.
-
- Args:
- policy: The policy to write to the output.
- key_name: The registry key backing the policy.
- builder: Builder to append lines to.
- '''
- policy_part_name = self._Escape(policy['name'] + '_Part')
- self._AddGuiString(policy_part_name, policy['label'])
-
- # Print the PART ... END PART section:
- builder.AddLine()
- adm_type = self.TYPE_TO_INPUT[policy['type']]
- builder.AddLine('PART !!%s %s' % (policy_part_name, adm_type), 1)
- if policy['type'] in ('list', 'string-enum-list'):
- # Note that the following line causes FullArmor ADMX Migrator to create
- # corrupt ADMX files. Please use admx_writer to get ADMX files.
- builder.AddLine('KEYNAME "%s\\%s"' % (key_name, policy['name']))
- builder.AddLine('VALUEPREFIX ""')
- else:
- builder.AddLine('VALUENAME "%s"' % policy['name'])
- if policy['type'] == 'int':
- # The default max for NUMERIC values is 9999 which is too small for us.
- builder.AddLine('MIN 0 MAX 2000000000')
- if policy['type'] in ('string', 'dict'):
- # The default max for EDITTEXT values is 1023, which is too small for
- # big JSON blobs and other string policies.
- builder.AddLine('MAXLEN 1000000')
- if policy['type'] in ('int-enum', 'string-enum'):
- builder.AddLine('ITEMLIST', 1)
- for item in policy['items']:
- if policy['type'] == 'int-enum':
- value_text = 'NUMERIC ' + str(item['value'])
- else:
- value_text = '"' + item['value'] + '"'
- string_id = self._Escape(item['name'] + '_DropDown')
- builder.AddLine('NAME !!%s VALUE %s' % (string_id, value_text))
- self._AddGuiString(string_id, item['caption'])
- builder.AddLine('END ITEMLIST', -1)
- builder.AddLine('END PART', -1)
-
- def _WritePolicy(self, policy, key_name, builder):
- if policy['type'] == 'external':
- # This type can only be set through cloud policy.
- return
-
- policy_name = self._Escape(policy['name'] + '_Policy')
- self._AddGuiString(policy_name, policy['caption'])
- builder.AddLine('POLICY !!%s' % policy_name, 1)
- self._WriteSupported(builder)
- policy_explain_name = self._Escape(policy['name'] + '_Explain')
- self._AddGuiString(policy_explain_name, policy['desc'])
- builder.AddLine('EXPLAIN !!' + policy_explain_name)
-
- if policy['type'] == 'main':
- builder.AddLine('VALUENAME "%s"' % policy['name'])
- builder.AddLine('VALUEON NUMERIC 1')
- builder.AddLine('VALUEOFF NUMERIC 0')
- else:
- self._WritePart(policy, key_name, builder)
-
- builder.AddLine('END POLICY', -1)
- builder.AddLine()
-
- def WriteComment(self, comment):
- self.lines.AddLine('; ' + comment)
-
- def WritePolicy(self, policy):
- if self.CanBeMandatory(policy):
- self._WritePolicy(policy,
- self.config['win_reg_mandatory_key_name'],
- self.policies)
-
- def WriteRecommendedPolicy(self, policy):
- self._WritePolicy(policy,
- self.config['win_reg_recommended_key_name'],
- self.recommended_policies)
-
- def BeginPolicyGroup(self, group):
- category_name = self._Escape(group['name'] + '_Category')
- self._AddGuiString(category_name, group['caption'])
- self.policies.AddLine('CATEGORY !!' + category_name, 1)
-
- def EndPolicyGroup(self):
- self.policies.AddLine('END CATEGORY', -1)
- self.policies.AddLine('')
-
- def BeginRecommendedPolicyGroup(self, group):
- category_name = self._Escape(group['name'] + '_Category')
- self._AddGuiString(category_name, group['caption'])
- self.recommended_policies.AddLine('CATEGORY !!' + category_name, 1)
-
- def EndRecommendedPolicyGroup(self):
- self.recommended_policies.AddLine('END CATEGORY', -1)
- self.recommended_policies.AddLine('')
-
- def _CreateTemplate(self, category_path, key_name, policies):
- '''Creates the whole ADM template except for the [Strings] section, and
- returns it as an |IndentedStringBuilder|.
-
- Args:
- category_path: List of strings representing the category path.
- key_name: Main registry key backing the policies.
- policies: ADM code for all the policies in an |IndentedStringBuilder|.
- '''
- lines = IndentedStringBuilder()
- for part in category_path:
- lines.AddLine('CATEGORY !!' + part, 1)
- lines.AddLine('KEYNAME "%s"' % key_name)
- lines.AddLine()
-
- lines.AddLines(policies)
-
- for part in category_path:
- lines.AddLine('END CATEGORY', -1)
- lines.AddLine()
-
- return lines
-
- def BeginTemplate(self):
- if self._GetChromiumVersionString() is not None:
- self.WriteComment(self.config['build'] + ' version: ' + \
- self._GetChromiumVersionString())
- self._AddGuiString(self.config['win_supported_os'],
- self.messages['win_supported_winxpsp2']['text'])
- categories = self.config['win_mandatory_category_path'] + \
- self.config['win_recommended_category_path']
- strings = self.config['win_category_path_strings'].copy()
- if 'adm_category_path_strings' in self.config:
- strings.update(self.config['adm_category_path_strings'])
- for category in categories:
- if (category in strings):
- # Replace {...} by localized messages.
- string = re.sub(r"\{(\w+)\}", \
- lambda m: self.messages[m.group(1)]['text'], \
- strings[category])
- self._AddGuiString(category, string)
- # All the policies will be written into self.policies.
- # The final template text will be assembled into self.lines by
- # self.EndTemplate().
-
- def EndTemplate(self):
- # Copy policies into self.lines.
- policy_class = self.config['win_group_policy_class'].upper()
- for class_name in ['MACHINE', 'USER']:
- if policy_class != 'BOTH' and policy_class != class_name:
- continue
- self.lines.AddLine('CLASS ' + class_name, 1)
- self.lines.AddLines(self._CreateTemplate(
- self.config['win_mandatory_category_path'],
- self.config['win_reg_mandatory_key_name'],
- self.policies))
- self.lines.AddLines(self._CreateTemplate(
- self.config['win_recommended_category_path'],
- self.config['win_reg_recommended_key_name'],
- self.recommended_policies))
- self.lines.AddLine('', -1)
- # Copy user strings into self.lines.
- self.lines.AddLine('[Strings]')
- self.lines.AddLines(self.strings)
-
- def Init(self):
- # String buffer for building the whole ADM file.
- self.lines = IndentedStringBuilder()
- # String buffer for building the strings section of the ADM file.
- self.strings = IndentedStringBuilder()
- # Map of strings seen, to avoid duplicates.
- self.strings_seen = {}
- # String buffer for building the policies of the ADM file.
- self.policies = IndentedStringBuilder()
- # String buffer for building the recommended policies of the ADM file.
- self.recommended_policies = IndentedStringBuilder()
-
- def GetTemplateText(self):
- return self.lines.ToString()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/adm_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/adm_writer_unittest.py
deleted file mode 100755
index ead22903a49..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/adm_writer_unittest.py
+++ /dev/null
@@ -1,1127 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.adm_writer'''
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-
-from grit.format.policy_templates.writers import writer_unittest_common
-
-
-class AdmWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests for AdmWriter.'''
-
- def ConstructOutput(self, classes, body, strings):
- result = []
- for clazz in classes:
- result.append('CLASS ' + clazz)
- result.append(body)
- result.append(strings)
- return ''.join(result)
-
- def CompareOutputs(self, output, expected_output):
- '''Compares the output of the adm_writer with its expected output.
-
- Args:
- output: The output of the adm writer as returned by grit.
- expected_output: The expected output.
-
- Raises:
- AssertionError: if the two strings are not equivalent.
- '''
- self.assertEquals(
- output.strip(),
- expected_output.strip().replace('\n', '\r\n'))
-
- def testEmpty(self):
- # Test PListWriter in case of empty polices.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least "Windows 3.11', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium': '1',}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least "Windows 3.11"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"''')
- self.CompareOutputs(output, expected_output)
-
- def testVersionAnnotation(self):
- # Test PListWriter in case of empty polices.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least "Windows 3.11', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(
- grd, 'fr', {'_chromium': '1', 'version':'39.0.0.0'}, 'adm', 'en')
- expected_output = '; chromium version: 39.0.0.0\n' + \
- self.ConstructOutput(['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least "Windows 3.11"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"''')
- self.CompareOutputs(output, expected_output)
-
- def testMainPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainPolicy',
- 'type': 'main',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- 'caption': 'Caption of main.',
- 'desc': 'Description of main.',
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.12', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome
- KEYNAME "Software\\Policies\\Google\\Chrome"
-
- POLICY !!MainPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!MainPolicy_Explain
- VALUENAME "MainPolicy"
- VALUEON NUMERIC 1
- VALUEOFF NUMERIC 0
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome_recommended
- KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
-
- POLICY !!MainPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!MainPolicy_Explain
- VALUENAME "MainPolicy"
- VALUEON NUMERIC 1
- VALUEOFF NUMERIC 0
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.12"
-Google:Cat_Google="Google"
-googlechrome="Google Chrome"
-googlechrome_recommended="Google Chrome - Recommended"
-MainPolicy_Policy="Caption of main."
-MainPolicy_Explain="Description of main."''')
- self.CompareOutputs(output, expected_output)
-
- def testMainPolicyRecommendedOnly(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainPolicy',
- 'type': 'main',
- 'supported_on': ['chrome.win:8-'],
- 'features': {
- 'can_be_recommended': True,
- 'can_be_mandatory': False
- },
- 'caption': 'Caption of main.',
- 'desc': 'Description of main.',
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.12', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome
- KEYNAME "Software\\Policies\\Google\\Chrome"
-
- END CATEGORY
- END CATEGORY
-
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome_recommended
- KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
-
- POLICY !!MainPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!MainPolicy_Explain
- VALUENAME "MainPolicy"
- VALUEON NUMERIC 1
- VALUEOFF NUMERIC 0
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.12"
-Google:Cat_Google="Google"
-googlechrome="Google Chrome"
-googlechrome_recommended="Google Chrome - Recommended"
-MainPolicy_Policy="Caption of main."
-MainPolicy_Explain="Description of main."''')
- self.CompareOutputs(output, expected_output)
-
- def testStringPolicy(self):
- # Tests a policy group with a single policy of type 'string'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'StringPolicy',
- 'type': 'string',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- 'desc': """Description of group.
-With a newline.""",
- 'caption': 'Caption of policy.',
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.13', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- POLICY !!StringPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!StringPolicy_Explain
-
- PART !!StringPolicy_Part EDITTEXT
- VALUENAME "StringPolicy"
- MAXLEN 1000000
- END PART
- END POLICY
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- POLICY !!StringPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!StringPolicy_Explain
-
- PART !!StringPolicy_Part EDITTEXT
- VALUENAME "StringPolicy"
- MAXLEN 1000000
- END PART
- END POLICY
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.13"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-StringPolicy_Policy="Caption of policy."
-StringPolicy_Explain="Description of group.\\nWith a newline."
-StringPolicy_Part="Caption of policy."
-''')
- self.CompareOutputs(output, expected_output)
-
- def testIntPolicy(self):
- # Tests a policy group with a single policy of type 'string'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'IntPolicy',
- 'type': 'int',
- 'caption': 'Caption of policy.',
- 'features': { 'can_be_recommended': True },
- 'desc': 'Description of policy.',
- 'supported_on': ['chrome.win:8-']
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.13', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- POLICY !!IntPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!IntPolicy_Explain
-
- PART !!IntPolicy_Part NUMERIC
- VALUENAME "IntPolicy"
- MIN 0 MAX 2000000000
- END PART
- END POLICY
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- POLICY !!IntPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!IntPolicy_Explain
-
- PART !!IntPolicy_Part NUMERIC
- VALUENAME "IntPolicy"
- MIN 0 MAX 2000000000
- END PART
- END POLICY
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.13"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-IntPolicy_Policy="Caption of policy."
-IntPolicy_Explain="Description of policy."
-IntPolicy_Part="Caption of policy."
-''')
- self.CompareOutputs(output, expected_output)
-
- def testIntEnumPolicy(self):
- # Tests a policy group with a single policy of type 'int-enum'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumPolicy',
- 'type': 'int-enum',
- 'items': [
- {
- 'name': 'ProxyServerDisabled',
- 'value': 0,
- 'caption': 'Option1',
- },
- {
- 'name': 'ProxyServerAutoDetect',
- 'value': 1,
- 'caption': 'Option2',
- },
- ],
- 'desc': 'Description of policy.',
- 'caption': 'Caption of policy.',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.14', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome
- KEYNAME "Software\\Policies\\Google\\Chrome"
-
- POLICY !!EnumPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!EnumPolicy_Explain
-
- PART !!EnumPolicy_Part DROPDOWNLIST
- VALUENAME "EnumPolicy"
- ITEMLIST
- NAME !!ProxyServerDisabled_DropDown VALUE NUMERIC 0
- NAME !!ProxyServerAutoDetect_DropDown VALUE NUMERIC 1
- END ITEMLIST
- END PART
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome_recommended
- KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
-
- POLICY !!EnumPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!EnumPolicy_Explain
-
- PART !!EnumPolicy_Part DROPDOWNLIST
- VALUENAME "EnumPolicy"
- ITEMLIST
- NAME !!ProxyServerDisabled_DropDown VALUE NUMERIC 0
- NAME !!ProxyServerAutoDetect_DropDown VALUE NUMERIC 1
- END ITEMLIST
- END PART
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.14"
-Google:Cat_Google="Google"
-googlechrome="Google Chrome"
-googlechrome_recommended="Google Chrome - Recommended"
-EnumPolicy_Policy="Caption of policy."
-EnumPolicy_Explain="Description of policy."
-EnumPolicy_Part="Caption of policy."
-ProxyServerDisabled_DropDown="Option1"
-ProxyServerAutoDetect_DropDown="Option2"
-''')
- self.CompareOutputs(output, expected_output)
-
- def testStringEnumPolicy(self):
- # Tests a policy group with a single policy of type 'int-enum'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumPolicy',
- 'type': 'string-enum',
- 'caption': 'Caption of policy.',
- 'desc': 'Description of policy.',
- 'items': [
- {'name': 'ProxyServerDisabled', 'value': 'one',
- 'caption': 'Option1'},
- {'name': 'ProxyServerAutoDetect', 'value': 'two',
- 'caption': 'Option2'},
- ],
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.14', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome
- KEYNAME "Software\\Policies\\Google\\Chrome"
-
- POLICY !!EnumPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!EnumPolicy_Explain
-
- PART !!EnumPolicy_Part DROPDOWNLIST
- VALUENAME "EnumPolicy"
- ITEMLIST
- NAME !!ProxyServerDisabled_DropDown VALUE "one"
- NAME !!ProxyServerAutoDetect_DropDown VALUE "two"
- END ITEMLIST
- END PART
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome_recommended
- KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
-
- POLICY !!EnumPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!EnumPolicy_Explain
-
- PART !!EnumPolicy_Part DROPDOWNLIST
- VALUENAME "EnumPolicy"
- ITEMLIST
- NAME !!ProxyServerDisabled_DropDown VALUE "one"
- NAME !!ProxyServerAutoDetect_DropDown VALUE "two"
- END ITEMLIST
- END PART
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.14"
-Google:Cat_Google="Google"
-googlechrome="Google Chrome"
-googlechrome_recommended="Google Chrome - Recommended"
-EnumPolicy_Policy="Caption of policy."
-EnumPolicy_Explain="Description of policy."
-EnumPolicy_Part="Caption of policy."
-ProxyServerDisabled_DropDown="Option1"
-ProxyServerAutoDetect_DropDown="Option2"
-''')
- self.CompareOutputs(output, expected_output)
-
- def testListPolicy(self):
- # Tests a policy group with a single policy of type 'list'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'ListPolicy',
- 'type': 'list',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- 'desc': """Description of list policy.
-With a newline.""",
- 'caption': 'Caption of list policy.',
- 'label': 'Label of list policy.'
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.15', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- },
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- POLICY !!ListPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!ListPolicy_Explain
-
- PART !!ListPolicy_Part LISTBOX
- KEYNAME "Software\\Policies\\Chromium\\ListPolicy"
- VALUEPREFIX ""
- END PART
- END POLICY
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- POLICY !!ListPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!ListPolicy_Explain
-
- PART !!ListPolicy_Part LISTBOX
- KEYNAME "Software\\Policies\\Chromium\\Recommended\\ListPolicy"
- VALUEPREFIX ""
- END PART
- END POLICY
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.15"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-ListPolicy_Policy="Caption of list policy."
-ListPolicy_Explain="Description of list policy.\\nWith a newline."
-ListPolicy_Part="Label of list policy."
-''')
- self.CompareOutputs(output, expected_output)
-
- def testStringEnumListPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum-list'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'ListPolicy',
- 'type': 'string-enum-list',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- 'desc': """Description of list policy.
-With a newline.""",
- 'items': [
- {'name': 'ProxyServerDisabled', 'value': 'one',
- 'caption': 'Option1'},
- {'name': 'ProxyServerAutoDetect', 'value': 'two',
- 'caption': 'Option2'},
- ],
- 'caption': 'Caption of list policy.',
- 'label': 'Label of list policy.'
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.15', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- },
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- POLICY !!ListPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!ListPolicy_Explain
-
- PART !!ListPolicy_Part LISTBOX
- KEYNAME "Software\\Policies\\Chromium\\ListPolicy"
- VALUEPREFIX ""
- END PART
- END POLICY
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- POLICY !!ListPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!ListPolicy_Explain
-
- PART !!ListPolicy_Part LISTBOX
- KEYNAME "Software\\Policies\\Chromium\\Recommended\\ListPolicy"
- VALUEPREFIX ""
- END PART
- END POLICY
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.15"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-ListPolicy_Policy="Caption of list policy."
-ListPolicy_Explain="Description of list policy.\\nWith a newline."
-ListPolicy_Part="Label of list policy."
-''')
- self.CompareOutputs(output, expected_output)
-
- def testDictionaryPolicy(self):
- # Tests a policy group with a single policy of type 'dict'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'DictionaryPolicy',
- 'type': 'dict',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- 'desc': 'Description of group.',
- 'caption': 'Caption of policy.',
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.13', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- POLICY !!DictionaryPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!DictionaryPolicy_Explain
-
- PART !!DictionaryPolicy_Part EDITTEXT
- VALUENAME "DictionaryPolicy"
- MAXLEN 1000000
- END PART
- END POLICY
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- POLICY !!DictionaryPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!DictionaryPolicy_Explain
-
- PART !!DictionaryPolicy_Part EDITTEXT
- VALUENAME "DictionaryPolicy"
- MAXLEN 1000000
- END PART
- END POLICY
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.13"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-DictionaryPolicy_Policy="Caption of policy."
-DictionaryPolicy_Explain="Description of group."
-DictionaryPolicy_Part="Caption of policy."
-''')
- self.CompareOutputs(output, expected_output)
-
- def testNonSupportedPolicy(self):
- # Tests a policy that is not supported on Windows, so it shouldn't
- # be included in the ADM file.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'NonWinGroup',
- 'type': 'group',
- 'policies': [{
- 'name': 'NonWinPolicy',
- 'type': 'list',
- 'supported_on': ['chrome.linux:8-', 'chrome.mac:8-'],
- 'caption': 'Caption of list policy.',
- 'desc': 'Desc of list policy.',
- }],
- 'caption': 'Group caption.',
- 'desc': 'Group description.',
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.16', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.16"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-''')
- self.CompareOutputs(output, expected_output)
-
- def testNonRecommendedPolicy(self):
- # Tests a policy that is not recommended, so it should be included.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainPolicy',
- 'type': 'main',
- 'supported_on': ['chrome.win:8-'],
- 'caption': 'Caption of main.',
- 'desc': 'Description of main.',
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.12', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome
- KEYNAME "Software\\Policies\\Google\\Chrome"
-
- POLICY !!MainPolicy_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!MainPolicy_Explain
- VALUENAME "MainPolicy"
- VALUEON NUMERIC 1
- VALUEOFF NUMERIC 0
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome_recommended
- KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
-
- END CATEGORY
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.12"
-Google:Cat_Google="Google"
-googlechrome="Google Chrome"
-googlechrome_recommended="Google Chrome - Recommended"
-MainPolicy_Policy="Caption of main."
-MainPolicy_Explain="Description of main."''')
- self.CompareOutputs(output, expected_output)
-
- def testPolicyGroup(self):
- # Tests a policy group that has more than one policies.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'Group1',
- 'type': 'group',
- 'desc': 'Description of group.',
- 'caption': 'Caption of group.',
- 'policies': [{
- 'name': 'Policy1',
- 'type': 'list',
- 'supported_on': ['chrome.win:8-'],
- 'features': { 'can_be_recommended': True },
- 'caption': 'Caption of policy1.',
- 'desc': """Description of policy1.
-With a newline."""
- },{
- 'name': 'Policy2',
- 'type': 'string',
- 'supported_on': ['chrome.win:8-'],
- 'caption': 'Caption of policy2.',
- 'desc': """Description of policy2.
-With a newline."""
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.16', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!chromium
- KEYNAME "Software\\Policies\\Chromium"
-
- CATEGORY !!Group1_Category
- POLICY !!Policy1_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!Policy1_Explain
-
- PART !!Policy1_Part LISTBOX
- KEYNAME "Software\\Policies\\Chromium\\Policy1"
- VALUEPREFIX ""
- END PART
- END POLICY
-
- POLICY !!Policy2_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!Policy2_Explain
-
- PART !!Policy2_Part EDITTEXT
- VALUENAME "Policy2"
- MAXLEN 1000000
- END PART
- END POLICY
-
- END CATEGORY
-
- END CATEGORY
-
- CATEGORY !!chromium_recommended
- KEYNAME "Software\\Policies\\Chromium\\Recommended"
-
- CATEGORY !!Group1_Category
- POLICY !!Policy1_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!Policy1_Explain
-
- PART !!Policy1_Part LISTBOX
- KEYNAME "Software\\Policies\\Chromium\\Recommended\\Policy1"
- VALUEPREFIX ""
- END PART
- END POLICY
-
- END CATEGORY
-
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.16"
-chromium="Chromium"
-chromium_recommended="Chromium - Recommended"
-Group1_Category="Caption of group."
-Policy1_Policy="Caption of policy1."
-Policy1_Explain="Description of policy1.\\nWith a newline."
-Policy1_Part="Caption of policy1."
-Policy2_Policy="Caption of policy2."
-Policy2_Explain="Description of policy2.\\nWith a newline."
-Policy2_Part="Caption of policy2."
-''')
- self.CompareOutputs(output, expected_output)
-
- def testDuplicatedStringEnumPolicy(self):
- # Verifies that duplicated enum constants get merged, and that
- # string constants get escaped.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumPolicy.A',
- 'type': 'string-enum',
- 'caption': 'Caption of policy A.',
- 'desc': 'Description of policy A.',
- 'items': [
- {'name': 'tls1.2', 'value': 'tls1.2', 'caption': 'tls1.2' },
- ],
- 'supported_on': ['chrome.win:39-'],
- },
- {
- 'name': 'EnumPolicy.B',
- 'type': 'string-enum',
- 'caption': 'Caption of policy B.',
- 'desc': 'Description of policy B.',
- 'items': [
- {'name': 'tls1.2', 'value': 'tls1.2', 'caption': 'tls1.2' },
- ],
- 'supported_on': ['chrome.win:39-'],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'win_supported_winxpsp2': {
- 'text': 'At least Windows 3.14', 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended', 'desc': 'bleh'
- }
- }
- }''')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'adm', 'en')
- expected_output = self.ConstructOutput(
- ['MACHINE', 'USER'], '''
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome
- KEYNAME "Software\\Policies\\Google\\Chrome"
-
- POLICY !!EnumPolicy_A_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!EnumPolicy_A_Explain
-
- PART !!EnumPolicy_A_Part DROPDOWNLIST
- VALUENAME "EnumPolicy.A"
- ITEMLIST
- NAME !!tls1_2_DropDown VALUE "tls1.2"
- END ITEMLIST
- END PART
- END POLICY
-
- POLICY !!EnumPolicy_B_Policy
- #if version >= 4
- SUPPORTED !!SUPPORTED_WINXPSP2
- #endif
- EXPLAIN !!EnumPolicy_B_Explain
-
- PART !!EnumPolicy_B_Part DROPDOWNLIST
- VALUENAME "EnumPolicy.B"
- ITEMLIST
- NAME !!tls1_2_DropDown VALUE "tls1.2"
- END ITEMLIST
- END PART
- END POLICY
-
- END CATEGORY
- END CATEGORY
-
- CATEGORY !!Google:Cat_Google
- CATEGORY !!googlechrome_recommended
- KEYNAME "Software\\Policies\\Google\\Chrome\\Recommended"
-
- END CATEGORY
- END CATEGORY
-
-
-''', '''[Strings]
-SUPPORTED_WINXPSP2="At least Windows 3.14"
-Google:Cat_Google="Google"
-googlechrome="Google Chrome"
-googlechrome_recommended="Google Chrome - Recommended"
-EnumPolicy_A_Policy="Caption of policy A."
-EnumPolicy_A_Explain="Description of policy A."
-EnumPolicy_A_Part="Caption of policy A."
-tls1_2_DropDown="tls1.2"
-EnumPolicy_B_Policy="Caption of policy B."
-EnumPolicy_B_Explain="Description of policy B."
-EnumPolicy_B_Part="Caption of policy B."
-''')
- self.CompareOutputs(output, expected_output)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/adml_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/adml_writer.py
deleted file mode 100755
index bb35f738379..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/adml_writer.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from xml.dom import minidom
-from grit.format.policy_templates.writers import xml_formatted_writer
-import re
-
-
-def GetWriter(config):
- '''Factory method for instanciating the ADMLWriter. Every Writer needs a
- GetWriter method because the TemplateFormatter uses this method to
- instantiate a Writer.
- '''
- return ADMLWriter(['win'], config)
-
-
-class ADMLWriter(xml_formatted_writer.XMLFormattedWriter):
- ''' Class for generating an ADML policy template. It is used by the
- PolicyTemplateGenerator to write the ADML file.
- '''
-
- # DOM root node of the generated ADML document.
- _doc = None
-
- # The string-table contains all ADML "string" elements.
- _string_table_elem = None
-
- # The presentation-table is the container for presentation elements, that
- # describe the presentation of Policy-Groups and Policies.
- _presentation_table_elem = None
-
- def _AddString(self, id, text):
- ''' Adds an ADML "string" element to _string_table_elem. The following
- ADML snippet contains an example:
-
- <string id="$(id)">$(text)</string>
-
- Args:
- id: ID of the newly created "string" element.
- text: Value of the newly created "string" element.
- '''
- id = id.replace('.', '_')
- if id in self.strings_seen:
- assert text == self.strings_seen[id]
- else:
- self.strings_seen[id] = text
- string_elem = self.AddElement(
- self._string_table_elem, 'string', {'id': id})
- string_elem.appendChild(self._doc.createTextNode(text))
-
- def WritePolicy(self, policy):
- '''Generates the ADML elements for a Policy.
- <stringTable>
- ...
- <string id="$(policy_group_name)">$(caption)</string>
- <string id="$(policy_group_name)_Explain">$(description)</string>
- </stringTable>
-
- <presentationTables>
- ...
- <presentation id=$(policy_group_name)/>
- </presentationTables>
-
- Args:
- policy: The Policy to generate ADML elements for.
- '''
- policy_type = policy['type']
- policy_name = policy['name']
- if 'caption' in policy:
- policy_caption = policy['caption']
- else:
- policy_caption = policy_name
- if 'desc' in policy:
- policy_description = policy['desc']
- else:
- policy_description = policy_name
- if 'label' in policy:
- policy_label = policy['label']
- else:
- policy_label = policy_name
-
- self._AddString(policy_name, policy_caption)
- self._AddString(policy_name + '_Explain', policy_description)
- presentation_elem = self.AddElement(
- self._presentation_table_elem, 'presentation', {'id': policy_name})
-
- if policy_type == 'main':
- pass
- elif policy_type in ('string', 'dict'):
- # 'dict' policies are configured as JSON-encoded strings on Windows.
- textbox_elem = self.AddElement(presentation_elem, 'textBox',
- {'refId': policy_name})
- label_elem = self.AddElement(textbox_elem, 'label')
- label_elem.appendChild(self._doc.createTextNode(policy_label))
- elif policy_type == 'int':
- textbox_elem = self.AddElement(presentation_elem, 'decimalTextBox',
- {'refId': policy_name})
- textbox_elem.appendChild(self._doc.createTextNode(policy_label + ':'))
- elif policy_type in ('int-enum', 'string-enum'):
- for item in policy['items']:
- self._AddString(item['name'], item['caption'])
- dropdownlist_elem = self.AddElement(presentation_elem, 'dropdownList',
- {'refId': policy_name})
- dropdownlist_elem.appendChild(self._doc.createTextNode(policy_label))
- elif policy_type in ('list', 'string-enum-list'):
- self._AddString(policy_name + 'Desc', policy_caption)
- listbox_elem = self.AddElement(presentation_elem, 'listBox',
- {'refId': policy_name + 'Desc'})
- listbox_elem.appendChild(self._doc.createTextNode(policy_label))
- elif policy_type == 'group':
- pass
- elif policy_type == 'external':
- # This type can only be set through cloud policy.
- pass
- else:
- raise Exception('Unknown policy type %s.' % policy_type)
-
- def BeginPolicyGroup(self, group):
- '''Generates ADML elements for a Policy-Group. For each Policy-Group two
- ADML "string" elements are added to the string-table. One contains the
- caption of the Policy-Group and the other a description. A Policy-Group also
- requires an ADML "presentation" element that must be added to the
- presentation-table. The "presentation" element is the container for the
- elements that define the visual presentation of the Policy-Goup's Policies.
- The following ADML snippet shows an example:
-
- Args:
- group: The Policy-Group to generate ADML elements for.
- '''
- # Add ADML "string" elements to the string-table that are required by a
- # Policy-Group.
- self._AddString(group['name'] + '_group', group['caption'])
-
- def _AddBaseStrings(self):
- ''' Adds ADML "string" elements to the string-table that are referenced by
- the ADMX file but not related to any specific Policy-Group or Policy.
- '''
- self._AddString(self.config['win_supported_os'],
- self.messages['win_supported_winxpsp2']['text'])
- categories = self.config['win_mandatory_category_path'] + \
- self.config['win_recommended_category_path']
- strings = self.config['win_category_path_strings']
- for category in categories:
- if (category in strings):
- # Replace {...} by localized messages.
- string = re.sub(r"\{(\w+)\}", \
- lambda m: self.messages[m.group(1)]['text'], \
- strings[category])
- self._AddString(category, string)
-
- def BeginTemplate(self):
- dom_impl = minidom.getDOMImplementation('')
- self._doc = dom_impl.createDocument(None, 'policyDefinitionResources',
- None)
- if self._GetChromiumVersionString() is not None:
- self.AddComment(self._doc.documentElement, self.config['build'] + \
- ' version: ' + self._GetChromiumVersionString())
- policy_definitions_resources_elem = self._doc.documentElement
- policy_definitions_resources_elem.attributes['revision'] = '1.0'
- policy_definitions_resources_elem.attributes['schemaVersion'] = '1.0'
-
- self.AddElement(policy_definitions_resources_elem, 'displayName')
- self.AddElement(policy_definitions_resources_elem, 'description')
- resources_elem = self.AddElement(policy_definitions_resources_elem,
- 'resources')
- self._string_table_elem = self.AddElement(resources_elem, 'stringTable')
- self._AddBaseStrings()
- self._presentation_table_elem = self.AddElement(resources_elem,
- 'presentationTable')
-
- def Init(self):
- # Map of all strings seen.
- self.strings_seen = {}
-
- def GetTemplateText(self):
- # Using "toprettyxml()" confuses the Windows Group Policy Editor
- # (gpedit.msc) because it interprets whitespace characters in text between
- # the "string" tags. This prevents gpedit.msc from displaying the category
- # names correctly.
- # TODO(markusheintz): Find a better formatting that works with gpedit.
- return self._doc.toxml()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/adml_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/adml_writer_unittest.py
deleted file mode 100755
index e22d92075ae..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/adml_writer_unittest.py
+++ /dev/null
@@ -1,450 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-"""Unittests for grit.format.policy_templates.writers.adml_writer."""
-
-
-import os
-import sys
-import unittest
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-
-from grit.format.policy_templates.writers import adml_writer
-from grit.format.policy_templates.writers import xml_writer_base_unittest
-
-
-class AdmlWriterUnittest(xml_writer_base_unittest.XmlWriterBaseTest):
-
- def setUp(self):
- config = {
- 'app_name': 'test',
- 'build': 'test',
- 'win_supported_os': 'SUPPORTED_TESTOS',
- 'win_mandatory_category_path': ['test_category'],
- 'win_recommended_category_path': ['test_recommended_category'],
- 'win_category_path_strings': {
- 'test_category': 'TestCategory',
- 'test_recommended_category': 'TestCategory - recommended'
- },
- }
- self.writer = adml_writer.GetWriter(config)
- self.writer.messages = {
- 'win_supported_winxpsp2': {
- 'text': 'Supported on Test OS or higher',
- 'desc': 'blah'
- },
- 'doc_recommended': {
- 'text': 'Recommended',
- 'desc': 'bleh'
- },
- }
- self.writer.Init()
-
- def _InitWriterForAddingPolicyGroups(self, writer):
- '''Initialize the writer for adding policy groups. This method must be
- called before the method "BeginPolicyGroup" can be called. It initializes
- attributes of the writer.
- '''
- writer.BeginTemplate()
-
- def _InitWriterForAddingPolicies(self, writer, policy):
- '''Initialize the writer for adding policies. This method must be
- called before the method "WritePolicy" can be called. It initializes
- attributes of the writer.
- '''
- self._InitWriterForAddingPolicyGroups(writer)
- policy_group = {
- 'name': 'PolicyGroup',
- 'caption': 'Test Caption',
- 'desc': 'This is the test description of the test policy group.',
- 'policies': policy,
- }
- writer.BeginPolicyGroup(policy_group)
-
- string_elements = \
- self.writer._string_table_elem.getElementsByTagName('string')
- for elem in string_elements:
- self.writer._string_table_elem.removeChild(elem)
-
- def testEmpty(self):
- self.writer.BeginTemplate()
- self.writer.EndTemplate()
- output = self.writer.GetTemplateText()
- expected_output = (
- '<?xml version="1.0" ?><policyDefinitionResources'
- ' revision="1.0" schemaVersion="1.0"><displayName/><description/>'
- '<resources><stringTable><string id="SUPPORTED_TESTOS">Supported on'
- ' Test OS or higher</string><string id="test_category">TestCategory'
- '</string><string id="test_recommended_category">'
- 'TestCategory - recommended</string></stringTable><presentationTable/>'
- '</resources></policyDefinitionResources>')
- self.AssertXMLEquals(output, expected_output)
-
- def testVersionAnnotation(self):
- self.writer.config['version'] = '39.0.0.0'
- self.writer.BeginTemplate()
- self.writer.EndTemplate()
- output = self.writer.GetTemplateText()
- expected_output = (
- '<?xml version="1.0" ?><policyDefinitionResources'
- ' revision="1.0" schemaVersion="1.0"><!--test version: 39.0.0.0-->'
- '<displayName/><description/><resources><stringTable>'
- '<string id="SUPPORTED_TESTOS">Supported on'
- ' Test OS or higher</string><string id="test_category">TestCategory'
- '</string><string id="test_recommended_category">'
- 'TestCategory - recommended</string></stringTable><presentationTable/>'
- '</resources></policyDefinitionResources>')
- self.AssertXMLEquals(output, expected_output)
-
- def testPolicyGroup(self):
- empty_policy_group = {
- 'name': 'PolicyGroup',
- 'caption': 'Test Group Caption',
- 'desc': 'This is the test description of the test policy group.',
- 'policies': [
- {'name': 'PolicyStub2',
- 'type': 'main'},
- {'name': 'PolicyStub1',
- 'type': 'main'},
- ],
- }
- self._InitWriterForAddingPolicyGroups(self.writer)
- self.writer.BeginPolicyGroup(empty_policy_group)
- self.writer.EndPolicyGroup
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="SUPPORTED_TESTOS">'
- 'Supported on Test OS or higher</string>\n'
- '<string id="test_category">TestCategory</string>\n'
- '<string id="test_recommended_category">'
- 'TestCategory - recommended</string>\n'
- '<string id="PolicyGroup_group">Test Group Caption</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = ''
- self.AssertXMLEquals(output, expected_output)
-
- def testMainPolicy(self):
- main_policy = {
- 'name': 'DummyMainPolicy',
- 'type': 'main',
- 'caption': 'Main policy caption',
- 'desc': 'Main policy test description.'
- }
- self. _InitWriterForAddingPolicies(self.writer, main_policy)
- self.writer.WritePolicy(main_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="DummyMainPolicy">Main policy caption</string>\n'
- '<string id="DummyMainPolicy_Explain">'
- 'Main policy test description.</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = '<presentation id="DummyMainPolicy"/>'
- self.AssertXMLEquals(output, expected_output)
-
- def testStringPolicy(self):
- string_policy = {
- 'name': 'StringPolicyStub',
- 'type': 'string',
- 'caption': 'String policy caption',
- 'label': 'String policy label',
- 'desc': 'This is a test description.',
- }
- self. _InitWriterForAddingPolicies(self.writer, string_policy)
- self.writer.WritePolicy(string_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="StringPolicyStub">String policy caption</string>\n'
- '<string id="StringPolicyStub_Explain">'
- 'This is a test description.</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="StringPolicyStub">\n'
- ' <textBox refId="StringPolicyStub">\n'
- ' <label>String policy label</label>\n'
- ' </textBox>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testIntPolicy(self):
- int_policy = {
- 'name': 'IntPolicyStub',
- 'type': 'int',
- 'caption': 'Int policy caption',
- 'label': 'Int policy label',
- 'desc': 'This is a test description.',
- }
- self. _InitWriterForAddingPolicies(self.writer, int_policy)
- self.writer.WritePolicy(int_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="IntPolicyStub">Int policy caption</string>\n'
- '<string id="IntPolicyStub_Explain">'
- 'This is a test description.</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="IntPolicyStub">\n'
- ' <decimalTextBox refId="IntPolicyStub">'
- 'Int policy label:</decimalTextBox>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testIntEnumPolicy(self):
- enum_policy = {
- 'name': 'EnumPolicyStub',
- 'type': 'int-enum',
- 'caption': 'Enum policy caption',
- 'label': 'Enum policy label',
- 'desc': 'This is a test description.',
- 'items': [
- {
- 'name': 'item 1',
- 'value': 1,
- 'caption': 'Caption Item 1',
- },
- {
- 'name': 'item 2',
- 'value': 2,
- 'caption': 'Caption Item 2',
- },
- ],
- }
- self. _InitWriterForAddingPolicies(self.writer, enum_policy)
- self.writer.WritePolicy(enum_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="EnumPolicyStub">Enum policy caption</string>\n'
- '<string id="EnumPolicyStub_Explain">'
- 'This is a test description.</string>\n'
- '<string id="item 1">Caption Item 1</string>\n'
- '<string id="item 2">Caption Item 2</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="EnumPolicyStub">\n'
- ' <dropdownList refId="EnumPolicyStub">'
- 'Enum policy label</dropdownList>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testStringEnumPolicy(self):
- enum_policy = {
- 'name': 'EnumPolicyStub',
- 'type': 'string-enum',
- 'caption': 'Enum policy caption',
- 'label': 'Enum policy label',
- 'desc': 'This is a test description.',
- 'items': [
- {
- 'name': 'item 1',
- 'value': 'value 1',
- 'caption': 'Caption Item 1',
- },
- {
- 'name': 'item 2',
- 'value': 'value 2',
- 'caption': 'Caption Item 2',
- },
- ],
- }
- self. _InitWriterForAddingPolicies(self.writer, enum_policy)
- self.writer.WritePolicy(enum_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="EnumPolicyStub">Enum policy caption</string>\n'
- '<string id="EnumPolicyStub_Explain">'
- 'This is a test description.</string>\n'
- '<string id="item 1">Caption Item 1</string>\n'
- '<string id="item 2">Caption Item 2</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="EnumPolicyStub">\n'
- ' <dropdownList refId="EnumPolicyStub">'
- 'Enum policy label</dropdownList>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testListPolicy(self):
- list_policy = {
- 'name': 'ListPolicyStub',
- 'type': 'list',
- 'caption': 'List policy caption',
- 'label': 'List policy label',
- 'desc': 'This is a test description.',
- }
- self. _InitWriterForAddingPolicies(self.writer, list_policy)
- self.writer.WritePolicy(list_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="ListPolicyStub">List policy caption</string>\n'
- '<string id="ListPolicyStub_Explain">'
- 'This is a test description.</string>\n'
- '<string id="ListPolicyStubDesc">List policy caption</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="ListPolicyStub">\n'
- ' <listBox refId="ListPolicyStubDesc">List policy label</listBox>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testStringEnumListPolicy(self):
- list_policy = {
- 'name': 'ListPolicyStub',
- 'type': 'string-enum-list',
- 'caption': 'List policy caption',
- 'label': 'List policy label',
- 'desc': 'This is a test description.',
- 'items': [
- {
- 'name': 'item 1',
- 'value': 'value 1',
- 'caption': 'Caption Item 1',
- },
- {
- 'name': 'item 2',
- 'value': 'value 2',
- 'caption': 'Caption Item 2',
- },
- ],
- }
- self. _InitWriterForAddingPolicies(self.writer, list_policy)
- self.writer.WritePolicy(list_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="ListPolicyStub">List policy caption</string>\n'
- '<string id="ListPolicyStub_Explain">'
- 'This is a test description.</string>\n'
- '<string id="ListPolicyStubDesc">List policy caption</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="ListPolicyStub">\n'
- ' <listBox refId="ListPolicyStubDesc">List policy label</listBox>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testDictionaryPolicy(self):
- dict_policy = {
- 'name': 'DictionaryPolicyStub',
- 'type': 'dict',
- 'caption': 'Dictionary policy caption',
- 'label': 'Dictionary policy label',
- 'desc': 'This is a test description.',
- }
- self. _InitWriterForAddingPolicies(self.writer, dict_policy)
- self.writer.WritePolicy(dict_policy)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="DictionaryPolicyStub">Dictionary policy caption</string>\n'
- '<string id="DictionaryPolicyStub_Explain">'
- 'This is a test description.</string>')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="DictionaryPolicyStub">\n'
- ' <textBox refId="DictionaryPolicyStub">\n'
- ' <label>Dictionary policy label</label>\n'
- ' </textBox>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
- def testPlatform(self):
- # Test that the writer correctly chooses policies of platform Windows.
- self.assertTrue(self.writer.IsPolicySupported({
- 'supported_on': [
- {'platforms': ['win', 'zzz']}, {'platforms': ['aaa']}
- ]
- }))
- self.assertFalse(self.writer.IsPolicySupported({
- 'supported_on': [
- {'platforms': ['mac', 'linux']}, {'platforms': ['aaa']}
- ]
- }))
-
- def testStringEncodings(self):
- enum_policy_a = {
- 'name': 'EnumPolicy.A',
- 'type': 'string-enum',
- 'caption': 'Enum policy A caption',
- 'label': 'Enum policy A label',
- 'desc': 'This is a test description.',
- 'items': [
- {
- 'name': 'tls1.2',
- 'value': 'tls1.2',
- 'caption': 'tls1.2',
- }
- ],
- }
- enum_policy_b = {
- 'name': 'EnumPolicy.B',
- 'type': 'string-enum',
- 'caption': 'Enum policy B caption',
- 'label': 'Enum policy B label',
- 'desc': 'This is a test description.',
- 'items': [
- {
- 'name': 'tls1.2',
- 'value': 'tls1.2',
- 'caption': 'tls1.2',
- }
- ],
- }
- self. _InitWriterForAddingPolicies(self.writer, enum_policy_a)
- self.writer.WritePolicy(enum_policy_a)
- self.writer.WritePolicy(enum_policy_b)
- # Assert generated string elements.
- output = self.GetXMLOfChildren(self.writer._string_table_elem)
- expected_output = (
- '<string id="EnumPolicy_A">Enum policy A caption</string>\n'
- '<string id="EnumPolicy_A_Explain">'
- 'This is a test description.</string>\n'
- '<string id="tls1_2">tls1.2</string>\n'
- '<string id="EnumPolicy_B">Enum policy B caption</string>\n'
- '<string id="EnumPolicy_B_Explain">'
- 'This is a test description.</string>\n')
- self.AssertXMLEquals(output, expected_output)
- # Assert generated presentation elements.
- output = self.GetXMLOfChildren(self.writer._presentation_table_elem)
- expected_output = (
- '<presentation id="EnumPolicy.A">\n'
- ' <dropdownList refId="EnumPolicy.A">'
- 'Enum policy A label</dropdownList>\n'
- '</presentation>\n'
- '<presentation id="EnumPolicy.B">\n'
- ' <dropdownList refId="EnumPolicy.B">'
- 'Enum policy B label</dropdownList>\n'
- '</presentation>')
- self.AssertXMLEquals(output, expected_output)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/admx_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/admx_writer.py
deleted file mode 100755
index 0864eee9c23..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/admx_writer.py
+++ /dev/null
@@ -1,392 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from xml.dom import minidom
-from grit.format.policy_templates.writers import xml_formatted_writer
-
-
-def GetWriter(config):
- '''Factory method for instanciating the ADMXWriter. Every Writer needs a
- GetWriter method because the TemplateFormatter uses this method to
- instantiate a Writer.
- '''
- return ADMXWriter(['win'], config)
-
-
-class ADMXWriter(xml_formatted_writer.XMLFormattedWriter):
- '''Class for generating an ADMX policy template. It is used by the
- PolicyTemplateGenerator to write the admx file.
- '''
-
- # DOM root node of the generated ADMX document.
- _doc = None
-
- # The ADMX "policies" element that contains the ADMX "policy" elements that
- # are generated.
- _active_policies_elem = None
-
- def _AdmlString(self, name):
- '''Creates a reference to the named string in an ADML file.
- Args:
- name: Name of the referenced ADML string.
- '''
- name = name.replace('.', '_')
- return '$(string.' + name + ')'
-
- def _AdmlStringExplain(self, name):
- '''Creates a reference to the named explanation string in an ADML file.
- Args:
- name: Name of the referenced ADML explanation.
- '''
- name = name.replace('.', '_')
- return '$(string.' + name + '_Explain)'
-
- def _AdmlPresentation(self, name):
- '''Creates a reference to the named presentation element in an ADML file.
- Args:
- name: Name of the referenced ADML presentation element.
- '''
- return '$(presentation.' + name + ')'
-
- def _AddPolicyNamespaces(self, parent, prefix, namespace):
- '''Generates the ADMX "policyNamespace" element and adds the elements to the
- passed parent element. The namespace of the generated ADMX document is
- define via the ADMX "target" element. Used namespaces are declared with an
- ADMX "using" element. ADMX "target" and "using" elements are children of the
- ADMX "policyNamespace" element.
-
- Args:
- parent: The parent node to which all generated elements are added.
- prefix: A logical name that can be used in the generated ADMX document to
- refere to this namespace.
- namespace: Namespace of the generated ADMX document.
- '''
- policy_namespaces_elem = self.AddElement(parent, 'policyNamespaces')
- attributes = {
- 'prefix': prefix,
- 'namespace': namespace,
- }
- self.AddElement(policy_namespaces_elem, 'target', attributes)
- if 'admx_using_namespaces' in self.config:
- prefix_namespace_map = self.config['admx_using_namespaces']
- for prefix in prefix_namespace_map:
- attributes = {
- 'prefix': prefix,
- 'namespace': prefix_namespace_map[prefix],
- }
- self.AddElement(policy_namespaces_elem, 'using', attributes)
- attributes = {
- 'prefix': 'windows',
- 'namespace': 'Microsoft.Policies.Windows',
- }
- self.AddElement(policy_namespaces_elem, 'using', attributes)
-
- def _AddCategory(self, parent, name, display_name,
- parent_category_name=None):
- '''Adds an ADMX category element to the passed parent node. The following
- snippet shows an example of a category element where "chromium" is the value
- of the parameter name:
-
- <category displayName="$(string.chromium)" name="chromium"/>
-
- Each parent node can have only one category with a given name. Adding the
- same category again with the same attributes is ignored, but adding it
- again with different attributes is an error.
-
- Args:
- parent: The parent node to which all generated elements are added.
- name: Name of the category.
- display_name: Display name of the category.
- parent_category_name: Name of the parent category. Defaults to None.
- '''
- existing = filter(lambda e: e.getAttribute('name') == name,
- parent.getElementsByTagName('category'))
- if existing:
- assert len(existing) == 1
- assert existing[0].getAttribute('name') == name
- assert existing[0].getAttribute('displayName') == display_name
- return
- attributes = {
- 'name': name,
- 'displayName': display_name,
- }
- category_elem = self.AddElement(parent, 'category', attributes)
- if parent_category_name:
- attributes = {'ref': parent_category_name}
- self.AddElement(category_elem, 'parentCategory', attributes)
-
- def _AddCategories(self, categories):
- '''Generates the ADMX "categories" element and adds it to the categories
- main node. The "categories" element defines the category for the policies
- defined in this ADMX document. Here is an example of an ADMX "categories"
- element:
-
- <categories>
- <category displayName="$(string.googlechrome)" name="googlechrome">
- <parentCategory ref="Google:Cat_Google"/>
- </category>
- </categories>
-
- Args:
- categories_path: The categories path e.g. ['google', 'googlechrome']. For
- each level in the path a "category" element will be generated, unless
- the level contains a ':', in which case it is treated as external
- references and no element is generated. Except for the root level, each
- level refers to its parent. Since the root level category has no parent
- it does not require a parent reference.
- '''
- category_name = None
- for category in categories:
- parent_category_name = category_name
- category_name = category
- if (":" not in category_name):
- self._AddCategory(self._categories_elem, category_name,
- self._AdmlString(category_name), parent_category_name)
-
- def _AddSupportedOn(self, parent, supported_os):
- '''Generates the "supportedOn" ADMX element and adds it to the passed
- parent node. The "supportedOn" element contains information about supported
- Windows OS versions. The following code snippet contains an example of a
- "supportedOn" element:
-
- <supportedOn>
- <definitions>
- <definition name="SUPPORTED_WINXPSP2"
- displayName="$(string.SUPPORTED_WINXPSP2)"/>
- </definitions>
- ...
- </supportedOn>
-
- Args:
- parent: The parent element to which all generated elements are added.
- supported_os: List with all supported Win OSes.
- '''
- supported_on_elem = self.AddElement(parent, 'supportedOn')
- definitions_elem = self.AddElement(supported_on_elem, 'definitions')
- attributes = {
- 'name': supported_os,
- 'displayName': self._AdmlString(supported_os)
- }
- self.AddElement(definitions_elem, 'definition', attributes)
-
- def _AddStringPolicy(self, parent, name):
- '''Generates ADMX elements for a String-Policy and adds them to the
- passed parent node.
- '''
- attributes = {
- 'id': name,
- 'valueName': name,
- 'maxLength': '1000000',
- }
- self.AddElement(parent, 'text', attributes)
-
- def _AddIntPolicy(self, parent, name):
- '''Generates ADMX elements for an Int-Policy and adds them to the passed
- parent node.
- '''
- attributes = {
- 'id': name,
- 'valueName': name,
- 'maxValue': '2000000000',
- }
- self.AddElement(parent, 'decimal', attributes)
-
- def _AddEnumPolicy(self, parent, policy):
- '''Generates ADMX elements for an Enum-Policy and adds them to the
- passed parent element.
- '''
- name = policy['name']
- items = policy['items']
- attributes = {
- 'id': name,
- 'valueName': name,
- }
- enum_elem = self.AddElement(parent, 'enum', attributes)
- for item in items:
- attributes = {'displayName': self._AdmlString(item['name'])}
- item_elem = self.AddElement(enum_elem, 'item', attributes)
- value_elem = self.AddElement(item_elem, 'value')
- value_string = str(item['value'])
- if policy['type'] == 'int-enum':
- self.AddElement(value_elem, 'decimal', {'value': value_string})
- else:
- self.AddElement(value_elem, 'string', {}, value_string)
-
- def _AddListPolicy(self, parent, key, name):
- '''Generates ADMX XML elements for a List-Policy and adds them to the
- passed parent element.
- '''
- attributes = {
- # The ID must be in sync with ID of the corresponding element in the ADML
- # file.
- 'id': name + 'Desc',
- 'valuePrefix': '',
- 'key': key + '\\' + name,
- }
- self.AddElement(parent, 'list', attributes)
-
- def _AddMainPolicy(self, parent):
- '''Generates ADMX elements for a Main-Policy amd adds them to the
- passed parent element.
- '''
- enabled_value_elem = self.AddElement(parent, 'enabledValue');
- self.AddElement(enabled_value_elem, 'decimal', {'value': '1'})
- disabled_value_elem = self.AddElement(parent, 'disabledValue');
- self.AddElement(disabled_value_elem, 'decimal', {'value': '0'})
-
- def _GetElements(self, policy_group_elem):
- '''Returns the ADMX "elements" child from an ADMX "policy" element. If the
- "policy" element has no "elements" child yet, a new child is created.
-
- Args:
- policy_group_elem: The ADMX "policy" element from which the child element
- "elements" is returned.
-
- Raises:
- Exception: The policy_group_elem does not contain a ADMX "policy" element.
- '''
- if policy_group_elem.tagName != 'policy':
- raise Exception('Expected a "policy" element but got a "%s" element'
- % policy_group_elem.tagName)
- elements_list = policy_group_elem.getElementsByTagName('elements');
- if len(elements_list) == 0:
- return self.AddElement(policy_group_elem, 'elements')
- elif len(elements_list) == 1:
- return elements_list[0]
- else:
- raise Exception('There is supposed to be only one "elements" node but'
- ' there are %s.' % str(len(elements_list)))
-
- def _WritePolicy(self, policy, name, key, parent):
- '''Generates AMDX elements for a Policy. There are four different policy
- types: Main-Policy, String-Policy, Enum-Policy and List-Policy.
- '''
- policies_elem = self._active_policies_elem
- policy_type = policy['type']
- policy_name = policy['name']
- if policy_type == 'external':
- # This type can only be set through cloud policy.
- return
-
- attributes = {
- 'name': name,
- 'class': self.config['win_group_policy_class'],
- 'displayName': self._AdmlString(policy_name),
- 'explainText': self._AdmlStringExplain(policy_name),
- 'presentation': self._AdmlPresentation(policy_name),
- 'key': key,
- }
- # Store the current "policy" AMDX element in self for later use by the
- # WritePolicy method.
- policy_elem = self.AddElement(policies_elem, 'policy',
- attributes)
- self.AddElement(policy_elem, 'parentCategory',
- {'ref': parent})
- self.AddElement(policy_elem, 'supportedOn',
- {'ref': self.config['win_supported_os']})
- if policy_type == 'main':
- self.AddAttribute(policy_elem, 'valueName', policy_name)
- self._AddMainPolicy(policy_elem)
- elif policy_type in ('string', 'dict'):
- # 'dict' policies are configured as JSON-encoded strings on Windows.
- parent = self._GetElements(policy_elem)
- self._AddStringPolicy(parent, policy_name)
- elif policy_type == 'int':
- parent = self._GetElements(policy_elem)
- self._AddIntPolicy(parent, policy_name)
- elif policy_type in ('int-enum', 'string-enum'):
- parent = self._GetElements(policy_elem)
- self._AddEnumPolicy(parent, policy)
- elif policy_type in ('list', 'string-enum-list'):
- parent = self._GetElements(policy_elem)
- self._AddListPolicy(parent, key, policy_name)
- elif policy_type == 'group':
- pass
- else:
- raise Exception('Unknown policy type %s.' % policy_type)
-
- def WritePolicy(self, policy):
- if self.CanBeMandatory(policy):
- self._WritePolicy(policy,
- policy['name'],
- self.config['win_reg_mandatory_key_name'],
- self._active_mandatory_policy_group_name)
-
- def WriteRecommendedPolicy(self, policy):
- self._WritePolicy(policy,
- policy['name'] + '_recommended',
- self.config['win_reg_recommended_key_name'],
- self._active_recommended_policy_group_name)
-
- def _BeginPolicyGroup(self, group, name, parent):
- '''Generates ADMX elements for a Policy-Group.
- '''
- attributes = {
- 'name': name,
- 'displayName': self._AdmlString(group['name'] + '_group'),
- }
- category_elem = self.AddElement(self._categories_elem,
- 'category',
- attributes)
- attributes = {
- 'ref': parent
- }
- self.AddElement(category_elem, 'parentCategory', attributes)
-
- def BeginPolicyGroup(self, group):
- self._BeginPolicyGroup(group,
- group['name'],
- self.config['win_mandatory_category_path'][-1])
- self._active_mandatory_policy_group_name = group['name']
-
- def EndPolicyGroup(self):
- self._active_mandatory_policy_group_name = \
- self.config['win_mandatory_category_path'][-1]
-
- def BeginRecommendedPolicyGroup(self, group):
- self._BeginPolicyGroup(group,
- group['name'] + '_recommended',
- self.config['win_recommended_category_path'][-1])
- self._active_recommended_policy_group_name = group['name'] + '_recommended'
-
- def EndRecommendedPolicyGroup(self):
- self._active_recommended_policy_group_name = \
- self.config['win_recommended_category_path'][-1]
-
- def BeginTemplate(self):
- '''Generates the skeleton of the ADMX template. An ADMX template contains
- an ADMX "PolicyDefinitions" element with four child nodes: "policies"
- "policyNamspaces", "resources", "supportedOn" and "categories"
- '''
- dom_impl = minidom.getDOMImplementation('')
- self._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
- if self._GetChromiumVersionString() is not None:
- self.AddComment(self._doc.documentElement, self.config['build'] + \
- ' version: ' + self._GetChromiumVersionString())
- policy_definitions_elem = self._doc.documentElement
-
- policy_definitions_elem.attributes['revision'] = '1.0'
- policy_definitions_elem.attributes['schemaVersion'] = '1.0'
-
- self._AddPolicyNamespaces(policy_definitions_elem,
- self.config['admx_prefix'],
- self.config['admx_namespace'])
- self.AddElement(policy_definitions_elem, 'resources',
- {'minRequiredRevision' : '1.0'})
- self._AddSupportedOn(policy_definitions_elem,
- self.config['win_supported_os'])
- self._categories_elem = self.AddElement(policy_definitions_elem,
- 'categories')
- self._AddCategories(self.config['win_mandatory_category_path'])
- self._AddCategories(self.config['win_recommended_category_path'])
- self._active_policies_elem = self.AddElement(policy_definitions_elem,
- 'policies')
- self._active_mandatory_policy_group_name = \
- self.config['win_mandatory_category_path'][-1]
- self._active_recommended_policy_group_name = \
- self.config['win_recommended_category_path'][-1]
-
- def GetTemplateText(self):
- return self.ToPrettyXml(self._doc)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/admx_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/admx_writer_unittest.py
deleted file mode 100755
index 2a642939fca..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/admx_writer_unittest.py
+++ /dev/null
@@ -1,577 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-"""Unittests for grit.format.policy_templates.writers.admx_writer."""
-
-
-import os
-import sys
-import unittest
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-
-from grit.format.policy_templates.writers import admx_writer
-from grit.format.policy_templates.writers import xml_writer_base_unittest
-from xml.dom import minidom
-
-
-class AdmxWriterUnittest(xml_writer_base_unittest.XmlWriterBaseTest):
-
- def _CreateDocumentElement(self):
- dom_impl = minidom.getDOMImplementation('')
- doc = dom_impl.createDocument(None, 'root', None)
- return doc.documentElement
-
- def setUp(self):
- # Writer configuration. This dictionary contains parameter used by the ADMX
- # Writer
- config = {
- 'win_group_policy_class': 'TestClass',
- 'win_supported_os': 'SUPPORTED_TESTOS',
- 'win_reg_mandatory_key_name': 'Software\\Policies\\Test',
- 'win_reg_recommended_key_name': 'Software\\Policies\\Test\\Recommended',
- 'win_mandatory_category_path': ['test_category'],
- 'win_recommended_category_path': ['test_recommended_category'],
- 'win_category_path_strings': {
- 'test_category': 'TestCategory',
- 'test_recommended_category': 'TestCategory - recommended'
- },
- 'admx_namespace': 'ADMXWriter.Test.Namespace',
- 'admx_prefix': 'test_prefix',
- 'build': 'test_product',
- }
- self.writer = admx_writer.GetWriter(config)
- self.writer.Init()
-
- def _GetPoliciesElement(self, doc):
- node_list = doc.getElementsByTagName('policies')
- self.assertTrue(node_list.length == 1)
- return node_list.item(0)
-
- def _GetCategoriesElement(self, doc):
- node_list = doc.getElementsByTagName('categories')
- self.assertTrue(node_list.length == 1)
- return node_list.item(0)
-
- def testEmpty(self):
- self.writer.BeginTemplate()
- self.writer.EndTemplate()
-
- output = self.writer.GetTemplateText()
- expected_output = (
- '<?xml version="1.0" ?>\n'
- '<policyDefinitions revision="1.0" schemaVersion="1.0">\n'
- ' <policyNamespaces>\n'
- ' <target namespace="ADMXWriter.Test.Namespace"'
- ' prefix="test_prefix"/>\n'
- ' <using namespace="Microsoft.Policies.Windows" prefix="windows"/>\n'
- ' </policyNamespaces>\n'
- ' <resources minRequiredRevision="1.0"/>\n'
- ' <supportedOn>\n'
- ' <definitions>\n'
- ' <definition displayName="'
- '$(string.SUPPORTED_TESTOS)" name="SUPPORTED_TESTOS"/>\n'
- ' </definitions>\n'
- ' </supportedOn>\n'
- ' <categories>\n'
- ' <category displayName="$(string.test_category)"'
- ' name="test_category"/>\n'
- ' <category displayName="$(string.test_recommended_category)"'
- ' name="test_recommended_category"/>\n'
- ' </categories>\n'
- ' <policies/>\n'
- '</policyDefinitions>')
- self.AssertXMLEquals(output, expected_output)
-
- def testEmptyVersion(self):
- self.writer.config['version'] = '39.0.0.0'
- self.writer.BeginTemplate()
- self.writer.EndTemplate()
-
- output = self.writer.GetTemplateText()
- expected_output = (
- '<?xml version="1.0" ?>\n'
- '<policyDefinitions revision="1.0" schemaVersion="1.0">\n'
- ' <!--test_product version: 39.0.0.0-->\n'
- ' <policyNamespaces>\n'
- ' <target namespace="ADMXWriter.Test.Namespace"'
- ' prefix="test_prefix"/>\n'
- ' <using namespace="Microsoft.Policies.Windows" prefix="windows"/>\n'
- ' </policyNamespaces>\n'
- ' <resources minRequiredRevision="1.0"/>\n'
- ' <supportedOn>\n'
- ' <definitions>\n'
- ' <definition displayName="'
- '$(string.SUPPORTED_TESTOS)" name="SUPPORTED_TESTOS"/>\n'
- ' </definitions>\n'
- ' </supportedOn>\n'
- ' <categories>\n'
- ' <category displayName="$(string.test_category)"'
- ' name="test_category"/>\n'
- ' <category displayName="$(string.test_recommended_category)"'
- ' name="test_recommended_category"/>\n'
- ' </categories>\n'
- ' <policies/>\n'
- '</policyDefinitions>')
- self.AssertXMLEquals(output, expected_output)
-
- def testEmptyPolicyGroup(self):
- empty_policy_group = {
- 'name': 'PolicyGroup',
- 'policies': []
- }
- # Initialize writer to write a policy group.
- self.writer.BeginTemplate()
- # Write policy group
- self.writer.BeginPolicyGroup(empty_policy_group)
- self.writer.EndPolicyGroup()
-
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = ''
- self.AssertXMLEquals(output, expected_output)
-
- output = self.GetXMLOfChildren(
- self._GetCategoriesElement(self.writer._doc))
- expected_output = (
- '<category displayName="$(string.test_category)"'
- ' name="test_category"/>\n'
- '<category displayName="$(string.test_recommended_category)"'
- ' name="test_recommended_category"/>\n'
- '<category displayName="$(string.PolicyGroup_group)"'
- ' name="PolicyGroup">\n'
- ' <parentCategory ref="test_category"/>\n'
- '</category>')
-
- self.AssertXMLEquals(output, expected_output)
-
- def testPolicyGroup(self):
- empty_policy_group = {
- 'name': 'PolicyGroup',
- 'policies': [
- {'name': 'PolicyStub2',
- 'type': 'main'},
- {'name': 'PolicyStub1',
- 'type': 'main'},
- ]
- }
- # Initialize writer to write a policy group.
- self.writer.BeginTemplate()
- # Write policy group
- self.writer.BeginPolicyGroup(empty_policy_group)
- self.writer.EndPolicyGroup()
-
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = ''
- self.AssertXMLEquals(output, expected_output)
-
- output = self.GetXMLOfChildren(
- self._GetCategoriesElement(self.writer._doc))
- expected_output = (
- '<category displayName="$(string.test_category)"'
- ' name="test_category"/>\n'
- '<category displayName="$(string.test_recommended_category)"'
- ' name="test_recommended_category"/>\n'
- '<category displayName="$(string.PolicyGroup_group)"'
- ' name="PolicyGroup">\n'
- ' <parentCategory ref="test_category"/>\n'
- '</category>')
- self.AssertXMLEquals(output, expected_output)
-
-
- def _initWriterForPolicy(self, writer, policy):
- '''Initializes the writer to write the given policy next.
- '''
- policy_group = {
- 'name': 'PolicyGroup',
- 'policies': [policy]
- }
- writer.BeginTemplate()
- writer.BeginPolicyGroup(policy_group)
-
- def testMainPolicy(self):
- main_policy = {
- 'name': 'DummyMainPolicy',
- 'type': 'main',
- }
-
- self._initWriterForPolicy(self.writer, main_policy)
-
- self.writer.WritePolicy(main_policy)
-
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.DummyMainPolicy)"'
- ' explainText="$(string.DummyMainPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="DummyMainPolicy"'
- ' presentation="$(presentation.DummyMainPolicy)"'
- ' valueName="DummyMainPolicy">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <enabledValue>\n'
- ' <decimal value="1"/>\n'
- ' </enabledValue>\n'
- ' <disabledValue>\n'
- ' <decimal value="0"/>\n'
- ' </disabledValue>\n'
- '</policy>')
-
- self.AssertXMLEquals(output, expected_output)
-
- def testRecommendedPolicy(self):
- main_policy = {
- 'name': 'DummyMainPolicy',
- 'type': 'main',
- }
-
- policy_group = {
- 'name': 'PolicyGroup',
- 'policies': [main_policy],
- }
- self.writer.BeginTemplate()
- self.writer.BeginRecommendedPolicyGroup(policy_group)
-
- self.writer.WriteRecommendedPolicy(main_policy)
-
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.DummyMainPolicy)"'
- ' explainText="$(string.DummyMainPolicy_Explain)"'
- ' key="Software\\Policies\\Test\\Recommended"'
- ' name="DummyMainPolicy_recommended"'
- ' presentation="$(presentation.DummyMainPolicy)"'
- ' valueName="DummyMainPolicy">\n'
- ' <parentCategory ref="PolicyGroup_recommended"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <enabledValue>\n'
- ' <decimal value="1"/>\n'
- ' </enabledValue>\n'
- ' <disabledValue>\n'
- ' <decimal value="0"/>\n'
- ' </disabledValue>\n'
- '</policy>')
-
- self.AssertXMLEquals(output, expected_output)
-
- def testRecommendedOnlyPolicy(self):
- main_policy = {
- 'name': 'DummyMainPolicy',
- 'type': 'main',
- 'features': {
- 'can_be_recommended': True,
- 'can_be_mandatory': False,
- }
- }
-
- policy_group = {
- 'name': 'PolicyGroup',
- 'policies': [main_policy],
- }
- self.writer.BeginTemplate()
- self.writer.BeginRecommendedPolicyGroup(policy_group)
-
- self.writer.WritePolicy(main_policy)
- self.writer.WriteRecommendedPolicy(main_policy)
-
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.DummyMainPolicy)"'
- ' explainText="$(string.DummyMainPolicy_Explain)"'
- ' key="Software\\Policies\\Test\\Recommended"'
- ' name="DummyMainPolicy_recommended"'
- ' presentation="$(presentation.DummyMainPolicy)"'
- ' valueName="DummyMainPolicy">\n'
- ' <parentCategory ref="PolicyGroup_recommended"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <enabledValue>\n'
- ' <decimal value="1"/>\n'
- ' </enabledValue>\n'
- ' <disabledValue>\n'
- ' <decimal value="0"/>\n'
- ' </disabledValue>\n'
- '</policy>')
-
- self.AssertXMLEquals(output, expected_output)
-
- def testStringPolicy(self):
- string_policy = {
- 'name': 'SampleStringPolicy',
- 'type': 'string',
- }
- self._initWriterForPolicy(self.writer, string_policy)
-
- self.writer.WritePolicy(string_policy)
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.SampleStringPolicy)"'
- ' explainText="$(string.SampleStringPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleStringPolicy"'
- ' presentation="$(presentation.SampleStringPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <text id="SampleStringPolicy" maxLength="1000000"'
- ' valueName="SampleStringPolicy"/>\n'
- ' </elements>\n'
- '</policy>')
- self.AssertXMLEquals(output, expected_output)
-
- def testIntPolicy(self):
- int_policy = {
- 'name': 'SampleIntPolicy',
- 'type': 'int',
- }
- self._initWriterForPolicy(self.writer, int_policy)
-
- self.writer.WritePolicy(int_policy)
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.SampleIntPolicy)"'
- ' explainText="$(string.SampleIntPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleIntPolicy"'
- ' presentation="$(presentation.SampleIntPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <decimal id="SampleIntPolicy" maxValue="2000000000" '
- 'valueName="SampleIntPolicy"/>\n'
- ' </elements>\n'
- '</policy>')
- self.AssertXMLEquals(output, expected_output)
-
- def testIntEnumPolicy(self):
- enum_policy = {
- 'name': 'SampleEnumPolicy',
- 'type': 'int-enum',
- 'items': [
- {'name': 'item_1', 'value': 0},
- {'name': 'item_2', 'value': 1},
- ]
- }
-
- self._initWriterForPolicy(self.writer, enum_policy)
- self.writer.WritePolicy(enum_policy)
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.SampleEnumPolicy)"'
- ' explainText="$(string.SampleEnumPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleEnumPolicy"'
- ' presentation="$(presentation.SampleEnumPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <enum id="SampleEnumPolicy" valueName="SampleEnumPolicy">\n'
- ' <item displayName="$(string.item_1)">\n'
- ' <value>\n'
- ' <decimal value="0"/>\n'
- ' </value>\n'
- ' </item>\n'
- ' <item displayName="$(string.item_2)">\n'
- ' <value>\n'
- ' <decimal value="1"/>\n'
- ' </value>\n'
- ' </item>\n'
- ' </enum>\n'
- ' </elements>\n'
- '</policy>')
- self.AssertXMLEquals(output, expected_output)
-
- def testStringEnumPolicy(self):
- enum_policy = {
- 'name': 'SampleEnumPolicy',
- 'type': 'string-enum',
- 'items': [
- {'name': 'item_1', 'value': 'one'},
- {'name': 'item_2', 'value': 'two'},
- ]
- }
-
- # This test is different than the others because it also tests that space
- # usage inside <string> nodes is correct.
- dom_impl = minidom.getDOMImplementation('')
- self.writer._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
- self.writer._active_policies_elem = self.writer._doc.documentElement
- self.writer._active_mandatory_policy_group_name = 'PolicyGroup'
- self.writer.WritePolicy(enum_policy)
- output = self.writer.GetTemplateText()
- expected_output = (
- '<?xml version="1.0" ?>\n'
- '<policyDefinitions>\n'
- ' <policy class="TestClass" displayName="$(string.SampleEnumPolicy)"'
- ' explainText="$(string.SampleEnumPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleEnumPolicy"'
- ' presentation="$(presentation.SampleEnumPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <enum id="SampleEnumPolicy" valueName="SampleEnumPolicy">\n'
- ' <item displayName="$(string.item_1)">\n'
- ' <value>\n'
- ' <string>one</string>\n'
- ' </value>\n'
- ' </item>\n'
- ' <item displayName="$(string.item_2)">\n'
- ' <value>\n'
- ' <string>two</string>\n'
- ' </value>\n'
- ' </item>\n'
- ' </enum>\n'
- ' </elements>\n'
- ' </policy>\n'
- '</policyDefinitions>')
- self.AssertXMLEquals(output, expected_output)
-
- def testListPolicy(self):
- list_policy = {
- 'name': 'SampleListPolicy',
- 'type': 'list',
- }
- self._initWriterForPolicy(self.writer, list_policy)
- self.writer.WritePolicy(list_policy)
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.SampleListPolicy)"'
- ' explainText="$(string.SampleListPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleListPolicy"'
- ' presentation="$(presentation.SampleListPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <list id="SampleListPolicyDesc"'
- ' key="Software\Policies\Test\SampleListPolicy" valuePrefix=""/>\n'
- ' </elements>\n'
- '</policy>')
-
- self.AssertXMLEquals(output, expected_output)
-
- def testStringEnumListPolicy(self):
- list_policy = {
- 'name': 'SampleListPolicy',
- 'type': 'string-enum-list',
- 'items': [
- {'name': 'item_1', 'value': 'one'},
- {'name': 'item_2', 'value': 'two'},
- ]
- }
- self._initWriterForPolicy(self.writer, list_policy)
- self.writer.WritePolicy(list_policy)
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.SampleListPolicy)"'
- ' explainText="$(string.SampleListPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleListPolicy"'
- ' presentation="$(presentation.SampleListPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <list id="SampleListPolicyDesc"'
- ' key="Software\Policies\Test\SampleListPolicy" valuePrefix=""/>\n'
- ' </elements>\n'
- '</policy>')
-
- self.AssertXMLEquals(output, expected_output)
-
- def testDictionaryPolicy(self):
- dict_policy = {
- 'name': 'SampleDictionaryPolicy',
- 'type': 'dict',
- }
- self._initWriterForPolicy(self.writer, dict_policy)
-
- self.writer.WritePolicy(dict_policy)
- output = self.GetXMLOfChildren(self._GetPoliciesElement(self.writer._doc))
- expected_output = (
- '<policy class="TestClass" displayName="$(string.'
- 'SampleDictionaryPolicy)"'
- ' explainText="$(string.SampleDictionaryPolicy_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleDictionaryPolicy"'
- ' presentation="$(presentation.SampleDictionaryPolicy)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <text id="SampleDictionaryPolicy" maxLength="1000000"'
- ' valueName="SampleDictionaryPolicy"/>\n'
- ' </elements>\n'
- '</policy>')
- self.AssertXMLEquals(output, expected_output)
-
- def testPlatform(self):
- # Test that the writer correctly chooses policies of platform Windows.
- self.assertTrue(self.writer.IsPolicySupported({
- 'supported_on': [
- {'platforms': ['win', 'zzz']}, {'platforms': ['aaa']}
- ]
- }))
- self.assertFalse(self.writer.IsPolicySupported({
- 'supported_on': [
- {'platforms': ['mac', 'linux']}, {'platforms': ['aaa']}
- ]
- }))
-
- def testStringEncodings(self):
- enum_policy_a = {
- 'name': 'SampleEnumPolicy.A',
- 'type': 'string-enum',
- 'items': [
- {'name': 'tls1.2', 'value': 'tls1.2'}
- ]
- }
- enum_policy_b = {
- 'name': 'SampleEnumPolicy.B',
- 'type': 'string-enum',
- 'items': [
- {'name': 'tls1.2', 'value': 'tls1.2'}
- ]
- }
-
- dom_impl = minidom.getDOMImplementation('')
- self.writer._doc = dom_impl.createDocument(None, 'policyDefinitions', None)
- self.writer._active_policies_elem = self.writer._doc.documentElement
- self.writer._active_mandatory_policy_group_name = 'PolicyGroup'
- self.writer.WritePolicy(enum_policy_a)
- self.writer.WritePolicy(enum_policy_b)
- output = self.writer.GetTemplateText()
- expected_output = (
- '<?xml version="1.0" ?>\n'
- '<policyDefinitions>\n'
- ' <policy class="TestClass" displayName="$(string.SampleEnumPolicy_A)"'
- ' explainText="$(string.SampleEnumPolicy_A_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleEnumPolicy.A"'
- ' presentation="$(presentation.SampleEnumPolicy.A)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <enum id="SampleEnumPolicy.A" valueName="SampleEnumPolicy.A">\n'
- ' <item displayName="$(string.tls1_2)">\n'
- ' <value>\n'
- ' <string>tls1.2</string>\n'
- ' </value>\n'
- ' </item>\n'
- ' </enum>\n'
- ' </elements>\n'
- ' </policy>\n'
- ' <policy class="TestClass" displayName="$(string.SampleEnumPolicy_B)"'
- ' explainText="$(string.SampleEnumPolicy_B_Explain)"'
- ' key="Software\\Policies\\Test" name="SampleEnumPolicy.B"'
- ' presentation="$(presentation.SampleEnumPolicy.B)">\n'
- ' <parentCategory ref="PolicyGroup"/>\n'
- ' <supportedOn ref="SUPPORTED_TESTOS"/>\n'
- ' <elements>\n'
- ' <enum id="SampleEnumPolicy.B" valueName="SampleEnumPolicy.B">\n'
- ' <item displayName="$(string.tls1_2)">\n'
- ' <value>\n'
- ' <string>tls1.2</string>\n'
- ' </value>\n'
- ' </item>\n'
- ' </enum>\n'
- ' </elements>\n'
- ' </policy>\n'
- '</policyDefinitions>')
- self.AssertXMLEquals(output, expected_output)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer.py
deleted file mode 100755
index b5a686260c3..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from grit.format.policy_templates.writers import xml_formatted_writer
-from xml.dom import minidom
-from xml.sax import saxutils as xml_escape
-
-
-def GetWriter(config):
- '''Factory method for creating AndroidPolicyWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return AndroidPolicyWriter(['android'], config)
-
-
-def _EscapeResource(resource):
- '''Escape the resource for usage in an Android resource XML file.
- This includes standard XML escaping as well as those specific to Android.
- '''
- if type(resource) == int:
- return str(resource)
- return xml_escape.escape(resource, {"'": "\\'", '"': '\\"', '\\': '\\\\'})
-
-
-class AndroidPolicyWriter(xml_formatted_writer.XMLFormattedWriter):
- '''Outputs localized Android Resource XML files.
- The policy strings are localized and exposed as string resources for
- consumption through Android's App restriction Schema.
- '''
-
- # DOM root node of the generated XML document.
- _doc = None
- # The resources node contains all resource 'string' and 'string-array'
- # elements.
- _resources = None
-
- def AddStringResource(self, name, string):
- '''Add a string resource of the given name.
- '''
- string_node = self._doc.createElement('string')
- string_node.setAttribute('name', name)
- string_node.appendChild(self._doc.createTextNode(_EscapeResource(string)))
- self._resources.appendChild(string_node)
-
- def AddStringArrayResource(self, name, string_items):
- '''Add a string-array resource of the given name and
- elements from string_items.
- '''
- string_array_node = self._doc.createElement('string-array')
- string_array_node.setAttribute('name', name)
- self._resources.appendChild(string_array_node)
- for item in string_items:
- string_node = self._doc.createElement('item')
- string_node.appendChild(self._doc.createTextNode(_EscapeResource(item)))
- string_array_node.appendChild(string_node)
-
- def PreprocessPolicies(self, policy_list):
- return self.FlattenGroupsAndSortPolicies(policy_list)
-
- def CanBeRecommended(self, policy):
- return False
-
- def WritePolicy(self, policy):
- name = policy['name']
- self.AddStringResource(name + 'Title', policy['caption'])
-
- # Get the policy description.
- description = policy['desc']
- self.AddStringResource(name + 'Desc', description)
-
- items = policy.get('items')
- if items is not None:
- entries = [ item['caption'] for item in items ]
- values = [ item['value'] for item in items ]
- self.AddStringArrayResource(name + 'Entries', entries)
- self.AddStringArrayResource(name + 'Values', values)
-
- def BeginTemplate(self):
- comment_text = 'DO NOT MODIFY THIS FILE DIRECTLY!\n' \
- 'IT IS GENERATED FROM policy_templates.json.'
- if self._GetChromiumVersionString():
- comment_text += '\n' + self.config['build'] + ' version: '\
- + self._GetChromiumVersionString()
- comment_node = self._doc.createComment(comment_text)
- self._doc.insertBefore(comment_node, self._resources)
-
- def Init(self):
- impl = minidom.getDOMImplementation()
- self._doc = impl.createDocument(None, 'resources', None)
- self._resources = self._doc.documentElement
-
- def GetTemplateText(self):
- return self.ToPrettyXml(self._doc)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer_unittest.py
deleted file mode 100755
index 18868c1ce5e..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/android_policy_writer_unittest.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.android_policy_writer'''
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-from xml.dom import minidom
-
-from grit.format.policy_templates.writers import writer_unittest_common
-from grit.format.policy_templates.writers import android_policy_writer
-
-
-class AndroidPolicyWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests to test assumptions in Android Policy Writer'''
-
- def testPolicyWithoutItems(self):
- # Test an example policy without items.
- policy = {
- 'name': '_policy_name',
- 'caption': '_policy_caption',
- 'desc': 'This is a long policy caption. More than one sentence '
- 'in a single line because it is very important.\n'
- 'Second line, also important'
- }
- writer = android_policy_writer.GetWriter({})
- writer.Init()
- writer.BeginTemplate()
- writer.WritePolicy(policy)
- self.assertEquals(
- writer._resources.toxml(),
- '<resources>'
- '<string name="_policy_nameTitle">_policy_caption</string>'
- '<string name="_policy_nameDesc">This is a long policy caption. More '
- 'than one sentence in a single line because it is very '
- 'important.\nSecond line, also important'
- '</string>'
- '</resources>')
-
- def testPolicyWithItems(self):
- # Test an example policy without items.
- policy = {
- 'name': '_policy_name',
- 'caption': '_policy_caption',
- 'desc': '_policy_desc_first.\nadditional line',
- 'items': [
- {
- 'caption':'_caption1',
- 'value':'_value1',
- },
- {
- 'caption':'_caption2',
- 'value':'_value2',
- }
- ]
- }
- writer = android_policy_writer.GetWriter({})
- writer.Init()
- writer.BeginTemplate()
- writer.WritePolicy(policy)
- self.assertEquals(
- writer._resources.toxml(),
- '<resources>'
- '<string name="_policy_nameTitle">_policy_caption</string>'
- '<string name="_policy_nameDesc">_policy_desc_first.\n'
- 'additional line</string>'
- '<string-array name="_policy_nameEntries">'
- '<item>_caption1</item>'
- '<item>_caption2</item>'
- '</string-array>'
- '<string-array name="_policy_nameValues">'
- '<item>_value1</item>'
- '<item>_value2</item>'
- '</string-array>'
- '</resources>')
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/doc_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/doc_writer.py
deleted file mode 100755
index 9956d828d7f..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/doc_writer.py
+++ /dev/null
@@ -1,758 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import json
-from xml.dom import minidom
-from grit import lazy_re
-from grit.format.policy_templates.writers import xml_formatted_writer
-
-
-def GetWriter(config):
- '''Factory method for creating DocWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return DocWriter(['*'], config)
-
-
-class DocWriter(xml_formatted_writer.XMLFormattedWriter):
- '''Class for generating policy templates in HTML format.
- The intended use of the generated file is to upload it on
- http://dev.chromium.org, therefore its format has some limitations:
- - No HTML and body tags.
- - Restricted set of element attributes: for example no 'class'.
- Because of the latter the output is styled using the 'style'
- attributes of HTML elements. This is supported by the dictionary
- self._STYLES[] and the method self._AddStyledElement(), they try
- to mimic the functionality of CSS classes. (But without inheritance.)
-
- This class is invoked by PolicyTemplateGenerator to create the HTML
- files.
- '''
-
- def _GetLocalizedMessage(self, msg_id):
- '''Returns a localized message for this writer.
-
- Args:
- msg_id: The identifier of the message.
-
- Returns:
- The localized message.
- '''
- return self.messages['doc_' + msg_id]['text']
-
- def _MapListToString(self, item_map, items):
- '''Creates a comma-separated list.
-
- Args:
- item_map: A dictionary containing all the elements of 'items' as
- keys.
- items: A list of arbitrary items.
-
- Returns:
- Looks up each item of 'items' in 'item_maps' and concatenates the
- resulting items into a comma-separated list.
- '''
- return ', '.join([item_map[x] for x in items])
-
- def _AddTextWithLinks(self, parent, text):
- '''Parse a string for URLs and add it to a DOM node with the URLs replaced
- with <a> HTML links.
-
- Args:
- parent: The DOM node to which the text will be added.
- text: The string to be added.
- '''
- # A simple regexp to search for URLs. It is enough for now.
- url_matcher = lazy_re.compile('(http://[^\\s]*[^\\s\\.])')
-
- # Iterate through all the URLs and replace them with links.
- while True:
- # Look for the first URL.
- res = url_matcher.search(text)
- if not res:
- break
- # Calculate positions of the substring of the URL.
- url = res.group(0)
- start = res.start(0)
- end = res.end(0)
- # Add the text prior to the URL.
- self.AddText(parent, text[:start])
- # Add a link for the URL.
- self.AddElement(parent, 'a', {'href': url}, url)
- # Drop the part of text that is added.
- text = text[end:]
- self.AddText(parent, text)
-
- def _AddParagraphs(self, parent, text):
- '''Break description into paragraphs and replace URLs with links.
-
- Args:
- parent: The DOM node to which the text will be added.
- text: The string to be added.
- '''
- # Split text into list of paragraphs.
- entries = text.split('\n\n')
- for entry in entries:
- # Create a new paragraph node.
- paragraph = self.AddElement(parent, 'p')
- # Insert text to the paragraph with processing the URLs.
- self._AddTextWithLinks(paragraph, entry)
-
- def _AddStyledElement(self, parent, name, style_ids, attrs=None, text=None):
- '''Adds an XML element to a parent, with CSS style-sheets included.
-
- Args:
- parent: The parent DOM node.
- name: Name of the element to add.
- style_ids: A list of CSS style strings from self._STYLE[].
- attrs: Dictionary of attributes for the element.
- text: Text content for the element.
- '''
- if attrs == None:
- attrs = {}
-
- style = ''.join([self._STYLE[x] for x in style_ids])
- if style != '':
- # Apply the style specified by style_ids.
- attrs['style'] = style + attrs.get('style', '')
- return self.AddElement(parent, name, attrs, text)
-
- def _AddDescription(self, parent, policy):
- '''Adds a string containing the description of the policy. URLs are
- replaced with links and the possible choices are enumerated in case
- of 'string-enum' and 'int-enum' type policies.
-
- Args:
- parent: The DOM node for which the feature list will be added.
- policy: The data structure of a policy.
- '''
- # Add description by paragraphs (URLs will be substituted by links).
- self._AddParagraphs(parent, policy['desc'])
- # Add list of enum items.
- if policy['type'] in ('string-enum', 'int-enum', 'string-enum-list'):
- ul = self.AddElement(parent, 'ul')
- for item in policy['items']:
- if policy['type'] == 'int-enum':
- value_string = str(item['value'])
- else:
- value_string = '"%s"' % item['value']
- self.AddElement(
- ul, 'li', {}, '%s = %s' % (value_string, item['caption']))
-
- def _AddFeatures(self, parent, policy):
- '''Adds a string containing the list of supported features of a policy
- to a DOM node. The text will look like as:
- Feature_X: Yes, Feature_Y: No
-
- Args:
- parent: The DOM node for which the feature list will be added.
- policy: The data structure of a policy.
- '''
- features = []
- # The sorting is to make the order well-defined for testing.
- keys = policy['features'].keys()
- keys.sort()
- for key in keys:
- key_name = self._FEATURE_MAP[key]
- if policy['features'][key]:
- value_name = self._GetLocalizedMessage('supported')
- else:
- value_name = self._GetLocalizedMessage('not_supported')
- features.append('%s: %s' % (key_name, value_name))
- self.AddText(parent, ', '.join(features))
-
- def _AddListExampleMac(self, parent, policy):
- '''Adds an example value for Mac of a 'list' policy to a DOM node.
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: A policy of type 'list', for which the Mac example value
- is generated.
- '''
- example_value = policy['example_value']
- self.AddElement(parent, 'dt', {}, 'Mac:')
- mac = self._AddStyledElement(parent, 'dd', ['.monospace', '.pre'])
-
- mac_text = ['<array>']
- for item in example_value:
- mac_text.append(' <string>%s</string>' % item)
- mac_text.append('</array>')
- self.AddText(mac, '\n'.join(mac_text))
-
- def _AddListExampleWindows(self, parent, policy):
- '''Adds an example value for Windows of a 'list' policy to a DOM node.
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: A policy of type 'list', for which the Windows example value
- is generated.
- '''
- example_value = policy['example_value']
- self.AddElement(parent, 'dt', {}, 'Windows:')
- win = self._AddStyledElement(parent, 'dd', ['.monospace', '.pre'])
- win_text = []
- cnt = 1
- if self.CanBeRecommended(policy) and not self.CanBeMandatory(policy):
- key_name = self.config['win_reg_recommended_key_name']
- else:
- key_name = self.config['win_reg_mandatory_key_name']
- for item in example_value:
- win_text.append(
- '%s\\%s\\%d = "%s"' %
- (key_name, policy['name'], cnt, item))
- cnt = cnt + 1
- self.AddText(win, '\n'.join(win_text))
-
- def _AddListExampleAndroidLinux(self, parent, policy):
- '''Adds an example value for Android/Linux of a 'list' policy to a DOM node.
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: A policy of type 'list', for which the Android/Linux example value
- is generated.
- '''
- example_value = policy['example_value']
- self.AddElement(parent, 'dt', {}, 'Android/Linux:')
- element = self._AddStyledElement(parent, 'dd', ['.monospace'])
- text = []
- for item in example_value:
- text.append('"%s"' % item)
- self.AddText(element, '[%s]' % ', '.join(text))
-
- def _AddListExample(self, parent, policy):
- '''Adds the example value of a 'list' policy to a DOM node. Example output:
- <dl>
- <dt>Windows:</dt>
- <dd>
- Software\Policies\Chromium\DisabledPlugins\0 = "Java"
- Software\Policies\Chromium\DisabledPlugins\1 = "Shockwave Flash"
- </dd>
- <dt>Android/Linux:</dt>
- <dd>["Java", "Shockwave Flash"]</dd>
- <dt>Mac:</dt>
- <dd>
- <array>
- <string>Java</string>
- <string>Shockwave Flash</string>
- </array>
- </dd>
- </dl>
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: The data structure of a policy.
- '''
- examples = self._AddStyledElement(parent, 'dl', ['dd dl'])
- if self.IsPolicySupportedOnPlatform(policy, 'win'):
- self._AddListExampleWindows(examples, policy)
- if (self.IsPolicySupportedOnPlatform(policy, 'android') or
- self.IsPolicySupportedOnPlatform(policy, 'linux')):
- self._AddListExampleAndroidLinux(examples, policy)
- if self.IsPolicySupportedOnPlatform(policy, 'mac'):
- self._AddListExampleMac(examples, policy)
-
- def _PythonObjectToPlist(self, obj, indent=''):
- '''Converts a python object to an equivalent XML plist.
-
- Returns a list of lines.'''
- obj_type = type(obj)
- if obj_type == bool:
- return [ '%s<%s/>' % (indent, 'true' if obj else 'false') ]
- elif obj_type == int:
- return [ '%s<integer>%s</integer>' % (indent, obj) ]
- elif obj_type == str:
- return [ '%s<string>%s</string>' % (indent, obj) ]
- elif obj_type == list:
- result = [ '%s<array>' % indent ]
- for item in obj:
- result += self._PythonObjectToPlist(item, indent + ' ')
- result.append('%s</array>' % indent)
- return result
- elif obj_type == dict:
- result = [ '%s<dict>' % indent ]
- for key in sorted(obj.keys()):
- result.append('%s<key>%s</key>' % (indent + ' ', key))
- result += self._PythonObjectToPlist(obj[key], indent + ' ')
- result.append('%s</dict>' % indent)
- return result
- else:
- raise Exception('Invalid object to convert: %s' % obj)
-
- def _AddDictionaryExampleMac(self, parent, policy):
- '''Adds an example value for Mac of a 'dict' policy to a DOM node.
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: A policy of type 'dict', for which the Mac example value
- is generated.
- '''
- example_value = policy['example_value']
- self.AddElement(parent, 'dt', {}, 'Mac:')
- mac = self._AddStyledElement(parent, 'dd', ['.monospace', '.pre'])
- mac_text = ['<key>%s</key>' % (policy['name'])]
- mac_text += self._PythonObjectToPlist(example_value)
- self.AddText(mac, '\n'.join(mac_text))
-
- def _AddDictionaryExampleWindows(self, parent, policy):
- '''Adds an example value for Windows of a 'dict' policy to a DOM node.
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: A policy of type 'dict', for which the Windows example value
- is generated.
- '''
- self.AddElement(parent, 'dt', {}, 'Windows:')
- win = self._AddStyledElement(parent, 'dd', ['.monospace', '.pre'])
- if self.CanBeRecommended(policy) and not self.CanBeMandatory(policy):
- key_name = self.config['win_reg_recommended_key_name']
- else:
- key_name = self.config['win_reg_mandatory_key_name']
- example = json.dumps(policy['example_value'])
- self.AddText(win, '%s\\%s = %s' % (key_name, policy['name'], example))
-
- def _AddDictionaryExampleAndroidLinux(self, parent, policy):
- '''Adds an example value for Android/Linux of a 'dict' policy to a DOM node.
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: A policy of type 'dict', for which the Android/Linux example value
- is generated.
- '''
- self.AddElement(parent, 'dt', {}, 'Android/Linux:')
- element = self._AddStyledElement(parent, 'dd', ['.monospace'])
- example = json.dumps(policy['example_value'])
- self.AddText(element, '%s: %s' % (policy['name'], example))
-
- def _AddDictionaryExample(self, parent, policy):
- '''Adds the example value of a 'dict' policy to a DOM node. Example output:
- <dl>
- <dt>Windows:</dt>
- <dd>
- Software\Policies\Chromium\ProxySettings = "{ 'ProxyMode': 'direct' }"
- </dd>
- <dt>Android/Linux:</dt>
- <dd>"ProxySettings": {
- "ProxyMode": "direct"
- }
- </dd>
- <dt>Mac:</dt>
- <dd>
- <key>ProxySettings</key>
- <dict>
- <key>ProxyMode</key>
- <string>direct</string>
- </dict>
- </dd>
- </dl>
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: The data structure of a policy.
- '''
- examples = self._AddStyledElement(parent, 'dl', ['dd dl'])
- if self.IsPolicySupportedOnPlatform(policy, 'win'):
- self._AddDictionaryExampleWindows(examples, policy)
- if (self.IsPolicySupportedOnPlatform(policy, 'android') or
- self.IsPolicySupportedOnPlatform(policy, 'linux')):
- self._AddDictionaryExampleAndroidLinux(examples, policy)
- if self.IsPolicySupportedOnPlatform(policy, 'mac'):
- self._AddDictionaryExampleMac(examples, policy)
-
- def _AddExample(self, parent, policy):
- '''Adds the HTML DOM representation of the example value of a policy to
- a DOM node. It is simple text for boolean policies, like
- '0x00000001 (Windows), true (Linux), true (Android), <true /> (Mac)'
- in case of boolean policies, but it may also contain other HTML elements.
- (See method _AddListExample.)
-
- Args:
- parent: The DOM node for which the example will be added.
- policy: The data structure of a policy.
-
- Raises:
- Exception: If the type of the policy is unknown or the example value
- of the policy is out of its expected range.
- '''
- example_value = policy['example_value']
- policy_type = policy['type']
- if policy_type == 'main':
- pieces = []
- if self.IsPolicySupportedOnPlatform(policy, 'win'):
- value = '0x00000001' if example_value else '0x00000000'
- pieces.append(value + ' (Windows)')
- if self.IsPolicySupportedOnPlatform(policy, 'linux'):
- value = 'true' if example_value else 'false'
- pieces.append(value + ' (Linux)')
- if self.IsPolicySupportedOnPlatform(policy, 'android'):
- value = 'true' if example_value else 'false'
- pieces.append(value + ' (Android)')
- if self.IsPolicySupportedOnPlatform(policy, 'mac'):
- value = '<true />' if example_value else '<false />'
- pieces.append(value + ' (Mac)')
- self.AddText(parent, ', '.join(pieces))
- elif policy_type == 'string':
- self.AddText(parent, '"%s"' % example_value)
- elif policy_type in ('int', 'int-enum'):
- pieces = []
- if self.IsPolicySupportedOnPlatform(policy, 'win'):
- pieces.append('0x%08x (Windows)' % example_value)
- if self.IsPolicySupportedOnPlatform(policy, 'linux'):
- pieces.append('%d (Linux)' % example_value)
- if self.IsPolicySupportedOnPlatform(policy, 'android'):
- pieces.append('%d (Android)' % example_value)
- if self.IsPolicySupportedOnPlatform(policy, 'mac'):
- pieces.append('%d (Mac)' % example_value)
- self.AddText(parent, ', '.join(pieces))
- elif policy_type == 'string-enum':
- self.AddText(parent, '"%s"' % (example_value))
- elif policy_type in ('list', 'string-enum-list'):
- self._AddListExample(parent, policy)
- elif policy_type == 'dict':
- self._AddDictionaryExample(parent, policy)
- else:
- raise Exception('Unknown policy type: ' + policy_type)
-
- def _AddPolicyAttribute(self, dl, term_id,
- definition=None, definition_style=None):
- '''Adds a term-definition pair to a HTML DOM <dl> node. This method is
- used by _AddPolicyDetails. Its result will have the form of:
- <dt style="...">...</dt>
- <dd style="...">...</dd>
-
- Args:
- dl: The DOM node of the <dl> list.
- term_id: A key to self._STRINGS[] which specifies the term of the pair.
- definition: The text of the definition. (Optional.)
- definition_style: List of references to values self._STYLE[] that specify
- the CSS stylesheet of the <dd> (definition) element.
-
- Returns:
- The DOM node representing the definition <dd> element.
- '''
- # Avoid modifying the default value of definition_style.
- if definition_style == None:
- definition_style = []
- term = self._GetLocalizedMessage(term_id)
- self._AddStyledElement(dl, 'dt', ['dt'], {}, term)
- return self._AddStyledElement(dl, 'dd', definition_style, {}, definition)
-
- def _AddSupportedOnList(self, parent, supported_on_list):
- '''Creates a HTML list containing the platforms, products and versions
- that are specified in the list of supported_on.
-
- Args:
- parent: The DOM node for which the list will be added.
- supported_on_list: The list of supported products, as a list of
- dictionaries.
- '''
- ul = self._AddStyledElement(parent, 'ul', ['ul'])
- for supported_on in supported_on_list:
- text = []
- product = supported_on['product']
- platforms = supported_on['platforms']
- text.append(self._PRODUCT_MAP[product])
- text.append('(%s)' %
- self._MapListToString(self._PLATFORM_MAP, platforms))
- if supported_on['since_version']:
- since_version = self._GetLocalizedMessage('since_version')
- text.append(since_version.replace('$6', supported_on['since_version']))
- if supported_on['until_version']:
- until_version = self._GetLocalizedMessage('until_version')
- text.append(until_version.replace('$6', supported_on['until_version']))
- # Add the list element:
- self.AddElement(ul, 'li', {}, ' '.join(text))
-
- def _AddPolicyDetails(self, parent, policy):
- '''Adds the list of attributes of a policy to the HTML DOM node parent.
- It will have the form:
- <dl>
- <dt>Attribute:</dt><dd>Description</dd>
- ...
- </dl>
-
- Args:
- parent: A DOM element for which the list will be added.
- policy: The data structure of the policy.
- '''
-
- dl = self.AddElement(parent, 'dl')
- data_type = [self._TYPE_MAP[policy['type']]]
- qualified_types = []
- is_complex_policy = False
- if (self.IsPolicySupportedOnPlatform(policy, 'android') and
- self._RESTRICTION_TYPE_MAP.get(policy['type'], None)):
- qualified_types.append('Android:%s' %
- self._RESTRICTION_TYPE_MAP[policy['type']])
- if policy['type'] in ('dict', 'list'):
- is_complex_policy = True
- if (self.IsPolicySupportedOnPlatform(policy, 'win') and
- self._REG_TYPE_MAP.get(policy['type'], None)):
- qualified_types.append('Windows:%s' % self._REG_TYPE_MAP[policy['type']])
- if policy['type'] == 'dict':
- is_complex_policy = True
- if qualified_types:
- data_type.append('[%s]' % ', '.join(qualified_types))
- if is_complex_policy:
- data_type.append('(%s)' %
- self._GetLocalizedMessage('complex_policies_on_windows'))
- self._AddPolicyAttribute(dl, 'data_type', ' '.join(data_type))
- if policy['type'] != 'external':
- # All types except 'external' can be set through platform policy.
- if self.IsPolicySupportedOnPlatform(policy, 'win'):
- if self.CanBeRecommended(policy) and not self.CanBeMandatory(policy):
- key_name = self.config['win_reg_recommended_key_name']
- else:
- key_name = self.config['win_reg_mandatory_key_name']
- self._AddPolicyAttribute(
- dl,
- 'win_reg_loc',
- key_name + '\\' + policy['name'],
- ['.monospace'])
- if (self.IsPolicySupportedOnPlatform(policy, 'linux') or
- self.IsPolicySupportedOnPlatform(policy, 'mac')):
- self._AddPolicyAttribute(
- dl,
- 'mac_linux_pref_name',
- policy['name'],
- ['.monospace'])
- if self.IsPolicySupportedOnPlatform(policy, 'android', 'chrome'):
- self._AddPolicyAttribute(
- dl,
- 'android_restriction_name',
- policy['name'],
- ['.monospace'])
- if self.IsPolicySupportedOnPlatform(policy, 'android', 'webview'):
- restriction_prefix = self.config['android_webview_restriction_prefix']
- self._AddPolicyAttribute(
- dl,
- 'android_webview_restriction_name',
- restriction_prefix + policy['name'],
- ['.monospace'])
- dd = self._AddPolicyAttribute(dl, 'supported_on')
- self._AddSupportedOnList(dd, policy['supported_on'])
- dd = self._AddPolicyAttribute(dl, 'supported_features')
- self._AddFeatures(dd, policy)
- dd = self._AddPolicyAttribute(dl, 'description')
- self._AddDescription(dd, policy)
- if 'arc_support' in policy:
- dd = self._AddPolicyAttribute(dl, 'arc_support')
- self._AddParagraphs(dd, policy['arc_support'])
- if (self.IsPolicySupportedOnPlatform(policy, 'win') or
- self.IsPolicySupportedOnPlatform(policy, 'linux') or
- self.IsPolicySupportedOnPlatform(policy, 'android') or
- self.IsPolicySupportedOnPlatform(policy, 'mac')):
- # Don't add an example for ChromeOS-only policies.
- if policy['type'] != 'external':
- # All types except 'external' can be set through platform policy.
- dd = self._AddPolicyAttribute(dl, 'example_value')
- self._AddExample(dd, policy)
-
- def _AddPolicyNote(self, parent, policy):
- '''If a policy has an additional web page assigned with it, then add
- a link for that page.
-
- Args:
- policy: The data structure of the policy.
- '''
- if 'problem_href' not in policy:
- return
- problem_href = policy['problem_href']
- div = self._AddStyledElement(parent, 'div', ['div.note'])
- note = self._GetLocalizedMessage('note').replace('$6', problem_href)
- self._AddParagraphs(div, note)
-
- def _AddPolicyRow(self, parent, policy):
- '''Adds a row for the policy in the summary table.
-
- Args:
- parent: The DOM node of the summary table.
- policy: The data structure of the policy.
- '''
- tr = self._AddStyledElement(parent, 'tr', ['tr'])
- indent = 'padding-left: %dpx;' % (7 + self._indent_level * 14)
- if policy['type'] != 'group':
- # Normal policies get two columns with name and caption.
- name_td = self._AddStyledElement(tr, 'td', ['td', 'td.left'],
- {'style': indent})
- self.AddElement(name_td, 'a',
- {'href': '#' + policy['name']}, policy['name'])
- self._AddStyledElement(tr, 'td', ['td', 'td.right'], {},
- policy['caption'])
- else:
- # Groups get one column with caption.
- name_td = self._AddStyledElement(tr, 'td', ['td', 'td.left'],
- {'style': indent, 'colspan': '2'})
- self.AddElement(name_td, 'a', {'href': '#' + policy['name']},
- policy['caption'])
-
- def _AddPolicySection(self, parent, policy):
- '''Adds a section about the policy in the detailed policy listing.
-
- Args:
- parent: The DOM node of the <div> of the detailed policy list.
- policy: The data structure of the policy.
- '''
- # Set style according to group nesting level.
- indent = 'margin-left: %dpx' % (self._indent_level * 28)
- if policy['type'] == 'group':
- heading = 'h2'
- else:
- heading = 'h3'
- parent2 = self.AddElement(parent, 'div', {'style': indent})
-
- h2 = self.AddElement(parent2, heading)
- self.AddElement(h2, 'a', {'name': policy['name']})
- if policy['type'] != 'group':
- # Normal policies get a full description.
- policy_name_text = policy['name']
- if 'deprecated' in policy and policy['deprecated'] == True:
- policy_name_text += " ("
- policy_name_text += self._GetLocalizedMessage('deprecated') + ")"
- self.AddText(h2, policy_name_text)
- self.AddElement(parent2, 'span', {}, policy['caption'])
- self._AddPolicyNote(parent2, policy)
- self._AddPolicyDetails(parent2, policy)
- else:
- # Groups get a more compact description.
- self.AddText(h2, policy['caption'])
- self._AddStyledElement(parent2, 'div', ['div.group_desc'],
- {}, policy['desc'])
- self.AddElement(
- parent2, 'a', {'href': '#top'},
- self._GetLocalizedMessage('back_to_top'))
-
- #
- # Implementation of abstract methods of TemplateWriter:
- #
-
- def IsDeprecatedPolicySupported(self, policy):
- return True
-
- def WritePolicy(self, policy):
- self._AddPolicyRow(self._summary_tbody, policy)
- self._AddPolicySection(self._details_div, policy)
-
- def BeginPolicyGroup(self, group):
- self.WritePolicy(group)
- self._indent_level += 1
-
- def EndPolicyGroup(self):
- self._indent_level -= 1
-
- def BeginTemplate(self):
- # Add a <div> for the summary section.
- if self._GetChromiumVersionString() is not None:
- self.AddComment(self._main_div, self.config['build'] + \
- ' version: ' + self._GetChromiumVersionString())
-
- summary_div = self.AddElement(self._main_div, 'div')
- self.AddElement(summary_div, 'a', {'name': 'top'})
- self.AddElement(summary_div, 'br')
- self._AddParagraphs(
- summary_div,
- self._GetLocalizedMessage('intro'))
- self.AddElement(summary_div, 'br')
- self.AddElement(summary_div, 'br')
- self.AddElement(summary_div, 'br')
- # Add the summary table of policies.
- summary_table = self._AddStyledElement(summary_div, 'table', ['table'])
- # Add the first row.
- thead = self.AddElement(summary_table, 'thead')
- tr = self._AddStyledElement(thead, 'tr', ['tr'])
- self._AddStyledElement(
- tr, 'td', ['td', 'td.left', 'thead td'], {},
- self._GetLocalizedMessage('name_column_title'))
- self._AddStyledElement(
- tr, 'td', ['td', 'td.right', 'thead td'], {},
- self._GetLocalizedMessage('description_column_title'))
- self._summary_tbody = self.AddElement(summary_table, 'tbody')
-
- # Add a <div> for the detailed policy listing.
- self._details_div = self.AddElement(self._main_div, 'div')
-
- def Init(self):
- dom_impl = minidom.getDOMImplementation('')
- self._doc = dom_impl.createDocument(None, 'html', None)
- body = self.AddElement(self._doc.documentElement, 'body')
- self._main_div = self.AddElement(body, 'div')
- self._indent_level = 0
-
- # Human-readable names of supported platforms.
- self._PLATFORM_MAP = {
- 'win': 'Windows',
- 'mac': 'Mac',
- 'linux': 'Linux',
- 'chrome_os': self.config['os_name'],
- 'android': 'Android',
- }
- # Human-readable names of supported products.
- self._PRODUCT_MAP = {
- 'chrome': self.config['app_name'],
- 'chrome_frame': self.config['frame_name'],
- 'chrome_os': self.config['os_name'],
- 'webview': self.config['webview_name'],
- }
- # Human-readable names of supported features. Each supported feature has
- # a 'doc_feature_X' entry in |self.messages|.
- self._FEATURE_MAP = {}
- for message in self.messages:
- if message.startswith('doc_feature_'):
- self._FEATURE_MAP[message[12:]] = self.messages[message]['text']
- # Human-readable names of types.
- self._TYPE_MAP = {
- 'string': 'String',
- 'int': 'Integer',
- 'main': 'Boolean',
- 'int-enum': 'Integer',
- 'string-enum': 'String',
- 'list': 'List of strings',
- 'string-enum-list': 'List of strings',
- 'dict': 'Dictionary',
- 'external': 'External data reference',
- }
- self._REG_TYPE_MAP = {
- 'string': 'REG_SZ',
- 'int': 'REG_DWORD',
- 'main': 'REG_DWORD',
- 'int-enum': 'REG_DWORD',
- 'string-enum': 'REG_SZ',
- 'dict': 'REG_SZ',
- }
- self._RESTRICTION_TYPE_MAP = {
- 'int-enum': 'choice',
- 'string-enum': 'choice',
- 'list': 'string',
- 'string-enum-list': 'multi-select',
- 'dict': 'string',
- }
- # The CSS style-sheet used for the document. It will be used in Google
- # Sites, which strips class attributes from HTML tags. To work around this,
- # the style-sheet is a dictionary and the style attributes will be added
- # "by hand" for each element.
- self._STYLE = {
- 'table': 'border-style: none; border-collapse: collapse;',
- 'tr': 'height: 0px;',
- 'td': 'border: 1px dotted rgb(170, 170, 170); padding: 7px; '
- 'vertical-align: top; width: 236px; height: 15px;',
- 'thead td': 'font-weight: bold;',
- 'td.left': 'width: 200px;',
- 'td.right': 'width: 100%;',
- 'dt': 'font-weight: bold;',
- 'dd dl': 'margin-top: 0px; margin-bottom: 0px;',
- '.monospace': 'font-family: monospace;',
- '.pre': 'white-space: pre;',
- 'div.note': 'border: 2px solid black; padding: 5px; margin: 5px;',
- 'div.group_desc': 'margin-top: 20px; margin-bottom: 20px;',
- 'ul': 'padding-left: 0px; margin-left: 0px;'
- }
-
-
- def GetTemplateText(self):
- # Return the text representation of the main <div> tag.
- return self._main_div.toxml()
- # To get a complete HTML file, use the following.
- # return self._doc.toxml()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/doc_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/doc_writer_unittest.py
deleted file mode 100755
index 91c2c763849..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/doc_writer_unittest.py
+++ /dev/null
@@ -1,996 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.doc_writer'''
-
-
-import json
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-from xml.dom import minidom
-
-from grit.format.policy_templates.writers import writer_unittest_common
-from grit.format.policy_templates.writers import doc_writer
-
-
-class MockMessageDictionary:
- '''A mock dictionary passed to a writer as the dictionary of
- localized messages.
- '''
-
- # Dictionary of messages.
- msg_dict = {}
-
-class DocWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests for DocWriter.'''
-
- def setUp(self):
- # Create a writer for the tests.
- self.writer = doc_writer.GetWriter(
- config={
- 'app_name': 'Chrome',
- 'frame_name': 'Chrome Frame',
- 'os_name': 'Chrome OS',
- 'webview_name': 'WebView',
- 'android_webview_restriction_prefix': 'mock.prefix:',
- 'win_reg_mandatory_key_name': 'MockKey',
- 'win_reg_recommended_key_name': 'MockKeyRec',
- 'build': 'test_product',
- })
- self.writer.messages = {
- 'doc_back_to_top': {'text': '_test_back_to_top'},
- 'doc_complex_policies_on_windows': {'text': '_test_complex_policies_win'},
- 'doc_data_type': {'text': '_test_data_type'},
- 'doc_description': {'text': '_test_description'},
- 'doc_arc_support': {'text': '_test_arc_support'},
- 'doc_description_column_title': {
- 'text': '_test_description_column_title'
- },
- 'doc_example_value': {'text': '_test_example_value'},
- 'doc_feature_dynamic_refresh': {'text': '_test_feature_dynamic_refresh'},
- 'doc_feature_can_be_recommended': {'text': '_test_feature_recommended'},
- 'doc_feature_can_be_mandatory': {'text': '_test_feature_mandatory'},
- 'doc_intro': {'text': '_test_intro'},
- 'doc_mac_linux_pref_name': {'text': '_test_mac_linux_pref_name'},
- 'doc_android_restriction_name': {
- 'text': '_test_android_restriction_name'
- },
- 'doc_android_webview_restriction_name': {
- 'text': '_test_android_webview_restriction_name'
- },
- 'doc_note': {'text': '_test_note'},
- 'doc_name_column_title': {'text': '_test_name_column_title'},
- 'doc_not_supported': {'text': '_test_not_supported'},
- 'doc_since_version': {'text': '_test_since_version'},
- 'doc_supported': {'text': '_test_supported'},
- 'doc_supported_features': {'text': '_test_supported_features'},
- 'doc_supported_on': {'text': '_test_supported_on'},
- 'doc_win_reg_loc': {'text': '_test_win_reg_loc'},
-
- 'doc_bla': {'text': '_test_bla'},
- }
- self.writer.Init()
-
- # It is not worth testing the exact content of style attributes.
- # Therefore we override them here with shorter texts.
- for key in self.writer._STYLE.keys():
- self.writer._STYLE[key] = 'style_%s;' % key
- # Add some more style attributes for additional testing.
- self.writer._STYLE['key1'] = 'style1;'
- self.writer._STYLE['key2'] = 'style2;'
-
- # Create a DOM document for the tests.
- dom_impl = minidom.getDOMImplementation('')
- self.doc = dom_impl.createDocument(None, 'root', None)
- self.doc_root = self.doc.documentElement
-
- def testSkeleton(self):
- # Test if DocWriter creates the skeleton of the document correctly.
- self.writer.BeginTemplate()
- self.assertEquals(
- self.writer._main_div.toxml(),
- '<div>'
- '<div>'
- '<a name="top"/><br/><p>_test_intro</p><br/><br/><br/>'
- '<table style="style_table;">'
- '<thead><tr style="style_tr;">'
- '<td style="style_td;style_td.left;style_thead td;">'
- '_test_name_column_title'
- '</td>'
- '<td style="style_td;style_td.right;style_thead td;">'
- '_test_description_column_title'
- '</td>'
- '</tr></thead>'
- '<tbody/>'
- '</table>'
- '</div>'
- '<div/>'
- '</div>')
-
- def testVersionAnnotation(self):
- # Test if DocWriter creates the skeleton of the document correctly.
- self.writer.config['version'] = '39.0.0.0'
- self.writer.BeginTemplate()
- self.assertEquals(
- self.writer._main_div.toxml(),
- '<div>'
- '<!--test_product version: 39.0.0.0-->'
- '<div>'
- '<a name="top"/><br/><p>_test_intro</p><br/><br/><br/>'
- '<table style="style_table;">'
- '<thead><tr style="style_tr;">'
- '<td style="style_td;style_td.left;style_thead td;">'
- '_test_name_column_title'
- '</td>'
- '<td style="style_td;style_td.right;style_thead td;">'
- '_test_description_column_title'
- '</td>'
- '</tr></thead>'
- '<tbody/>'
- '</table>'
- '</div>'
- '<div/>'
- '</div>')
-
- def testGetLocalizedMessage(self):
- # Test if localized messages are retrieved correctly.
- self.writer.messages = {
- 'doc_hello_world': {'text': 'hello, vilag!'}
- }
- self.assertEquals(
- self.writer._GetLocalizedMessage('hello_world'),
- 'hello, vilag!')
-
- def testMapListToString(self):
- # Test function DocWriter.MapListToString()
- self.assertEquals(
- self.writer._MapListToString({'a1': 'a2', 'b1': 'b2'}, ['a1', 'b1']),
- 'a2, b2')
- self.assertEquals(
- self.writer._MapListToString({'a1': 'a2', 'b1': 'b2'}, []),
- '')
- result = self.writer._MapListToString(
- {'a': '1', 'b': '2', 'c': '3', 'd': '4'}, ['b', 'd'])
- expected_result = '2, 4'
- self.assertEquals(
- result,
- expected_result)
-
- def testAddStyledElement(self):
- # Test function DocWriter.AddStyledElement()
-
- # Test the case of zero style.
- e1 = self.writer._AddStyledElement(
- self.doc_root, 'z', [], {'a': 'b'}, 'text')
- self.assertEquals(
- e1.toxml(),
- '<z a="b">text</z>')
-
- # Test the case of one style.
- e2 = self.writer._AddStyledElement(
- self.doc_root, 'z', ['key1'], {'a': 'b'}, 'text')
- self.assertEquals(
- e2.toxml(),
- '<z a="b" style="style1;">text</z>')
-
- # Test the case of two styles.
- e3 = self.writer._AddStyledElement(
- self.doc_root, 'z', ['key1', 'key2'], {'a': 'b'}, 'text')
- self.assertEquals(
- e3.toxml(),
- '<z a="b" style="style1;style2;">text</z>')
-
- def testAddDescriptionIntEnum(self):
- # Test if URLs are replaced and choices of 'int-enum' policies are listed
- # correctly.
- policy = {
- 'type': 'int-enum',
- 'items': [
- {'value': 0, 'caption': 'Disable foo'},
- {'value': 2, 'caption': 'Solve your problem'},
- {'value': 5, 'caption': 'Enable bar'},
- ],
- 'desc': '''This policy disables foo, except in case of bar.
-See http://policy-explanation.example.com for more details.
-'''
- }
- self.writer._AddDescription(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '''<root><p>This policy disables foo, except in case of bar.
-See <a href="http://policy-explanation.example.com">http://policy-explanation.example.com</a> for more details.
-</p><ul><li>0 = Disable foo</li><li>2 = Solve your problem</li><li>5 = Enable bar</li></ul></root>''')
-
- def testAddDescriptionStringEnum(self):
- # Test if URLs are replaced and choices of 'int-enum' policies are listed
- # correctly.
- policy = {
- 'type': 'string-enum',
- 'items': [
- {'value': "one", 'caption': 'Disable foo'},
- {'value': "two", 'caption': 'Solve your problem'},
- {'value': "three", 'caption': 'Enable bar'},
- ],
- 'desc': '''This policy disables foo, except in case of bar.
-See http://policy-explanation.example.com for more details.
-'''
- }
- self.writer._AddDescription(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '''<root><p>This policy disables foo, except in case of bar.
-See <a href="http://policy-explanation.example.com">http://policy-explanation.example.com</a> for more details.
-</p><ul><li>&quot;one&quot; = Disable foo</li><li>&quot;two&quot; = Solve your problem</li><li>&quot;three&quot; = Enable bar</li></ul></root>''')
-
- def testAddFeatures(self):
- # Test if the list of features of a policy is handled correctly.
- policy = {
- 'features': {
- 'spaceship_docking': False,
- 'dynamic_refresh': True,
- 'can_be_recommended': True,
- }
- }
- self.writer._FEATURE_MAP = {
- 'can_be_recommended': 'Can Be Recommended',
- 'dynamic_refresh': 'Dynamic Refresh',
- 'spaceship_docking': 'Spaceship Docking',
- }
- self.writer._AddFeatures(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- 'Can Be Recommended: _test_supported, '
- 'Dynamic Refresh: _test_supported, '
- 'Spaceship Docking: _test_not_supported'
- '</root>')
-
- def testAddListExample(self):
- policy = {
- 'name': 'PolicyName',
- 'example_value': ['Foo', 'Bar'],
- 'supported_on': [ { 'platforms': ['win', 'mac', 'linux'] } ]
- }
- self.writer._AddListExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<dl style="style_dd dl;">'
- '<dt>Windows:</dt>'
- '<dd style="style_.monospace;style_.pre;">'
- 'MockKey\\PolicyName\\1 = &quot;Foo&quot;\n'
- 'MockKey\\PolicyName\\2 = &quot;Bar&quot;'
- '</dd>'
- '<dt>Android/Linux:</dt>'
- '<dd style="style_.monospace;">'
- '[&quot;Foo&quot;, &quot;Bar&quot;]'
- '</dd>'
- '<dt>Mac:</dt>'
- '<dd style="style_.monospace;style_.pre;">'
- '&lt;array&gt;\n'
- ' &lt;string&gt;Foo&lt;/string&gt;\n'
- ' &lt;string&gt;Bar&lt;/string&gt;\n'
- '&lt;/array&gt;'
- '</dd>'
- '</dl>'
- '</root>')
-
- def testBoolExample(self):
- # Test representation of boolean example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'main',
- 'example_value': True,
- 'supported_on': [ { 'platforms': ['win', 'mac', 'linux', 'android'] } ]
- }
- e1 = self.writer.AddElement(self.doc_root, 'e1')
- self.writer._AddExample(e1, policy)
- self.assertEquals(
- e1.toxml(),
- '<e1>0x00000001 (Windows),'
- ' true (Linux), true (Android),'
- ' &lt;true /&gt; (Mac)</e1>')
-
- policy = {
- 'name': 'PolicyName',
- 'type': 'main',
- 'example_value': False,
- 'supported_on': [ { 'platforms': ['win', 'mac', 'linux', 'android'] } ]
- }
- e2 = self.writer.AddElement(self.doc_root, 'e2')
- self.writer._AddExample(e2, policy)
- self.assertEquals(
- e2.toxml(),
- '<e2>0x00000000 (Windows),'
- ' false (Linux), false (Android),'
- ' &lt;false /&gt; (Mac)</e2>')
-
- def testIntEnumExample(self):
- # Test representation of 'int-enum' example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'int-enum',
- 'example_value': 16,
- 'supported_on': [ { 'platforms': ['win', 'mac', 'linux', 'android'] } ]
- }
- self.writer._AddExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>0x00000010 (Windows), 16 (Linux), 16 (Android), 16 (Mac)</root>')
-
- def testStringEnumExample(self):
- # Test representation of 'string-enum' example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'string-enum',
- 'example_value': "wacky"
- }
- self.writer._AddExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>&quot;wacky&quot;</root>')
-
- def testListExample(self):
- # Test representation of 'list' example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'list',
- 'example_value': ['one', 'two'],
- 'supported_on': [ { 'platforms': ['linux'] } ]
- }
- self.writer._AddExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><dl style="style_dd dl;">'
- '<dt>Android/Linux:</dt>'
- '<dd style="style_.monospace;">'
- '[&quot;one&quot;, &quot;two&quot;]'
- '</dd></dl></root>')
-
- def testStringEnumListExample(self):
- # Test representation of 'string-enum-list' example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'string-enum-list',
- 'example_value': ['one', 'two'],
- 'supported_on': [ { 'platforms': ['linux'] } ]
- }
- self.writer._AddExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><dl style="style_dd dl;">'
- '<dt>Android/Linux:</dt>'
- '<dd style="style_.monospace;">'
- '[&quot;one&quot;, &quot;two&quot;]'
- '</dd></dl></root>')
-
- def testStringExample(self):
- # Test representation of 'string' example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'string',
- 'example_value': 'awesome-example'
- }
- self.writer._AddExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>&quot;awesome-example&quot;</root>')
-
- def testIntExample(self):
- # Test representation of 'int' example values.
- policy = {
- 'name': 'PolicyName',
- 'type': 'int',
- 'example_value': 26,
- 'supported_on': [ { 'platforms': ['win', 'mac', 'linux', 'android'] } ]
- }
- self.writer._AddExample(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>0x0000001a (Windows), 26 (Linux), 26 (Android), 26 (Mac)</root>')
-
- def testAddPolicyAttribute(self):
- # Test creating a policy attribute term-definition pair.
- self.writer._AddPolicyAttribute(
- self.doc_root, 'bla', 'hello, world', ['key1'])
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<dt style="style_dt;">_test_bla</dt>'
- '<dd style="style1;">hello, world</dd>'
- '</root>')
-
- def testAddPolicyDetails(self):
- # Test if the definition list (<dl>) of policy details is created correctly.
- policy = {
- 'type': 'main',
- 'name': 'TestPolicyName',
- 'caption': 'TestPolicyCaption',
- 'desc': 'TestPolicyDesc',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['win', 'mac', 'linux'],
- 'since_version': '8',
- 'until_version': '',
- }, {
- 'product': 'chrome',
- 'platforms': ['android'],
- 'since_version': '30',
- 'until_version': '',
- }, {
- 'product': 'webview',
- 'platforms': ['android'],
- 'since_version': '47',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': False,
- 'arc_support': 'TestArcSupportNote'
- }
- self.writer.messages['doc_since_version'] = {'text': '...$6...'}
- self.writer._AddPolicyDetails(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Boolean [Windows:REG_DWORD]</dd>'
- '<dt style="style_dt;">_test_win_reg_loc</dt>'
- '<dd style="style_.monospace;">MockKey\TestPolicyName</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">TestPolicyName</dd>'
- '<dt style="style_dt;">_test_android_restriction_name</dt>'
- '<dd style="style_.monospace;">TestPolicyName</dd>'
- '<dt style="style_dt;">_test_android_webview_restriction_name</dt>'
- '<dd style="style_.monospace;">mock.prefix:TestPolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Windows, Mac, Linux) ...8...</li>'
- '<li>Chrome (Android) ...30...</li>'
- '<li>WebView (Android) ...47...</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt><dd><p>TestPolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_arc_support</dt>'
- '<dd><p>TestArcSupportNote</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>0x00000000 (Windows), false (Linux),'
- ' false (Android), &lt;false /&gt; (Mac)</dd>'
- '</dl></root>')
-
- def testAddPolicyDetailsNoArcSupport(self):
- # Test that the entire Android-on-Chrome-OS sub-section is left out when
- # 'arc_support' is not specified.
- policy = {
- 'type': 'main',
- 'name': 'TestPolicyName',
- 'caption': 'TestPolicyCaption',
- 'desc': 'TestPolicyDesc',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['linux'],
- 'since_version': '8',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': False
- }
- self.writer.messages['doc_since_version'] = {'text': '...$6...'}
- self.writer._AddPolicyDetails(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Boolean</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">TestPolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Linux) ...8...</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt>'
- '<dd><p>TestPolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>false (Linux)</dd>'
- '</dl></root>')
-
- def testAddDictPolicyDetails(self):
- # Test if the definition list (<dl>) of policy details is created correctly
- # for 'dict' policies.
- policy = {
- 'type': 'dict',
- 'name': 'TestPolicyName',
- 'caption': 'TestPolicyCaption',
- 'desc': 'TestPolicyDesc',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['win', 'mac', 'linux'],
- 'since_version': '8',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': { 'foo': 123 }
- }
- self.writer.messages['doc_since_version'] = {'text': '...$6...'}
- self.writer._AddPolicyDetails(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Dictionary [Windows:REG_SZ] (_test_complex_policies_win)</dd>'
- '<dt style="style_dt;">_test_win_reg_loc</dt>'
- '<dd style="style_.monospace;">MockKey\TestPolicyName</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">TestPolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Windows, Mac, Linux) ...8...</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt><dd><p>TestPolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>'
- '<dl style="style_dd dl;">'
- '<dt>Windows:</dt>'
- '<dd style="style_.monospace;style_.pre;">MockKey\TestPolicyName = {&quot;foo&quot;: 123}</dd>'
- '<dt>Android/Linux:</dt>'
- '<dd style="style_.monospace;">TestPolicyName: {&quot;foo&quot;: 123}</dd>'
- '<dt>Mac:</dt>'
- '<dd style="style_.monospace;style_.pre;">'
- '&lt;key&gt;TestPolicyName&lt;/key&gt;\n'
- '&lt;dict&gt;\n'
- ' &lt;key&gt;foo&lt;/key&gt;\n'
- ' &lt;integer&gt;123&lt;/integer&gt;\n'
- '&lt;/dict&gt;'
- '</dd>'
- '</dl>'
- '</dd>'
- '</dl></root>')
-
- def testAddPolicyDetailsRecommendedOnly(self):
- policy = {
- 'type': 'main',
- 'name': 'TestPolicyName',
- 'caption': 'TestPolicyCaption',
- 'desc': 'TestPolicyDesc',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['win', 'mac', 'linux'],
- 'since_version': '8',
- 'until_version': '',
- }, {
- 'product': 'chrome',
- 'platforms': ['android'],
- 'since_version': '30',
- 'until_version': '',
- }],
- 'features': {
- 'dynamic_refresh': False,
- 'can_be_mandatory': False,
- 'can_be_recommended': True
- },
- 'example_value': False
- }
- self.writer.messages['doc_since_version'] = {'text': '...$6...'}
- self.writer._AddPolicyDetails(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Boolean [Windows:REG_DWORD]</dd>'
- '<dt style="style_dt;">_test_win_reg_loc</dt>'
- '<dd style="style_.monospace;">MockKeyRec\TestPolicyName</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">TestPolicyName</dd>'
- '<dt style="style_dt;">_test_android_restriction_name</dt>'
- '<dd style="style_.monospace;">TestPolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Windows, Mac, Linux) ...8...</li>'
- '<li>Chrome (Android) ...30...</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_mandatory: _test_not_supported,'
- ' _test_feature_recommended: _test_supported,'
- ' _test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt><dd><p>TestPolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>0x00000000 (Windows), false (Linux),'
- ' false (Android), &lt;false /&gt; (Mac)</dd>'
- '</dl></root>')
-
- def testAddPolicyNote(self):
- # TODO(jkummerow): The functionality tested by this test is currently not
- # used for anything and will probably soon be removed.
- # Test if nodes are correctly added to policies.
- policy = {
- 'problem_href': 'http://www.example.com/5'
- }
- self.writer.messages['doc_note'] = {'text': '...$6...'}
- self.writer._AddPolicyNote(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><div style="style_div.note;"><p>...'
- '<a href="http://www.example.com/5">http://www.example.com/5</a>'
- '...</p></div></root>')
-
- def testAddPolicyRow(self):
- # Test if policies are correctly added to the summary table.
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'type': 'string',
- }
- self.writer._indent_level = 3
- self.writer._AddPolicyRow(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><tr style="style_tr;">'
- '<td style="style_td;style_td.left;padding-left: 49px;">'
- '<a href="#PolicyName">PolicyName</a>'
- '</td>'
- '<td style="style_td;style_td.right;">PolicyCaption</td>'
- '</tr></root>')
- self.setUp()
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'type': 'group',
- }
- self.writer._indent_level = 2
- self.writer._AddPolicyRow(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><tr style="style_tr;">'
- '<td colspan="2" style="style_td;style_td.left;padding-left: 35px;">'
- '<a href="#PolicyName">PolicyCaption</a>'
- '</td>'
- '</tr></root>')
-
- def testAddPolicySection(self):
- # Test if policy details are correctly added to the document.
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'desc': 'PolicyDesc',
- 'type': 'string',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['win', 'mac'],
- 'since_version': '7',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': 'False'
- }
- self.writer.messages['doc_since_version'] = {'text': '..$6..'}
- self.writer._AddPolicySection(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<div style="margin-left: 0px">'
- '<h3><a name="PolicyName"/>PolicyName</h3>'
- '<span>PolicyCaption</span>'
- '<dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>String [Windows:REG_SZ]</dd>'
- '<dt style="style_dt;">_test_win_reg_loc</dt>'
- '<dd style="style_.monospace;">MockKey\\PolicyName</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">PolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Windows, Mac) ..7..</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt>'
- '<dd><p>PolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>&quot;False&quot;</dd>'
- '</dl>'
- '<a href="#top">_test_back_to_top</a>'
- '</div>'
- '</root>')
- # Test for groups.
- self.setUp()
- policy['type'] = 'group'
- self.writer._AddPolicySection(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<div style="margin-left: 0px">'
- '<h2><a name="PolicyName"/>PolicyCaption</h2>'
- '<div style="style_div.group_desc;">PolicyDesc</div>'
- '<a href="#top">_test_back_to_top</a>'
- '</div>'
- '</root>')
-
- def testAddPolicySectionForWindowsOnly(self):
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'desc': 'PolicyDesc',
- 'type': 'int',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['win'],
- 'since_version': '33',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': 123
- }
- self.writer.messages['doc_since_version'] = {'text': '..$6..'}
- self.writer._AddPolicySection(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<div style="margin-left: 0px">'
- '<h3><a name="PolicyName"/>PolicyName</h3>'
- '<span>PolicyCaption</span>'
- '<dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Integer [Windows:REG_DWORD]</dd>'
- '<dt style="style_dt;">_test_win_reg_loc</dt>'
- '<dd style="style_.monospace;">MockKey\\PolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Windows) ..33..</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt>'
- '<dd><p>PolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>0x0000007b (Windows)</dd>'
- '</dl>'
- '<a href="#top">_test_back_to_top</a>'
- '</div>'
- '</root>')
-
- def testAddPolicySectionForMacOnly(self):
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'desc': 'PolicyDesc',
- 'type': 'int',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['mac'],
- 'since_version': '33',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': 123
- }
- self.writer.messages['doc_since_version'] = {'text': '..$6..'}
- self.writer._AddPolicySection(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<div style="margin-left: 0px">'
- '<h3><a name="PolicyName"/>PolicyName</h3>'
- '<span>PolicyCaption</span>'
- '<dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Integer</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">PolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Mac) ..33..</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt>'
- '<dd><p>PolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>123 (Mac)</dd>'
- '</dl>'
- '<a href="#top">_test_back_to_top</a>'
- '</div>'
- '</root>')
-
- def testAddPolicySectionForLinuxOnly(self):
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'desc': 'PolicyDesc',
- 'type': 'int',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['linux'],
- 'since_version': '33',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': 123
- }
- self.writer.messages['doc_since_version'] = {'text': '..$6..'}
- self.writer._AddPolicySection(self.doc_root, policy)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<div style="margin-left: 0px">'
- '<h3><a name="PolicyName"/>PolicyName</h3>'
- '<span>PolicyCaption</span>'
- '<dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Integer</dd>'
- '<dt style="style_dt;">_test_mac_linux_pref_name</dt>'
- '<dd style="style_.monospace;">PolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Linux) ..33..</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt>'
- '<dd><p>PolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>123 (Linux)</dd>'
- '</dl>'
- '<a href="#top">_test_back_to_top</a>'
- '</div>'
- '</root>')
-
- def testAddPolicySectionForAndroidOnly(self):
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'desc': 'PolicyDesc',
- 'type': 'int',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['android'],
- 'since_version': '33',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': 123
- }
- self.writer.messages['doc_since_version'] = {'text': '..$6..'}
- self.writer._AddPolicySection(self.doc_root, policy)
- self.assertTrue(self.writer.IsPolicySupportedOnPlatform(policy, 'android'))
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<div style="margin-left: 0px">'
- '<h3><a name="PolicyName"/>PolicyName</h3>'
- '<span>PolicyCaption</span>'
- '<dl>'
- '<dt style="style_dt;">_test_data_type</dt>'
- '<dd>Integer</dd>'
- '<dt style="style_dt;">_test_android_restriction_name</dt>'
- '<dd style="style_.monospace;">PolicyName</dd>'
- '<dt style="style_dt;">_test_supported_on</dt>'
- '<dd>'
- '<ul style="style_ul;">'
- '<li>Chrome (Android) ..33..</li>'
- '</ul>'
- '</dd>'
- '<dt style="style_dt;">_test_supported_features</dt>'
- '<dd>_test_feature_dynamic_refresh: _test_not_supported</dd>'
- '<dt style="style_dt;">_test_description</dt>'
- '<dd><p>PolicyDesc</p></dd>'
- '<dt style="style_dt;">_test_example_value</dt>'
- '<dd>123 (Android)</dd>'
- '</dl>'
- '<a href="#top">_test_back_to_top</a>'
- '</div>'
- '</root>')
-
- def testAddDictionaryExample(self):
- policy = {
- 'name': 'PolicyName',
- 'caption': 'PolicyCaption',
- 'desc': 'PolicyDesc',
- 'type': 'dict',
- 'supported_on': [{
- 'product': 'chrome',
- 'platforms': ['win', 'mac', 'linux'],
- 'since_version': '7',
- 'until_version': '',
- }],
- 'features': {'dynamic_refresh': False},
- 'example_value': {
- "ProxyMode": "direct",
- "List": ["1", "2", "3"],
- "True": True,
- "False": False,
- "Integer": 123,
- "DictList": [ {
- "A": 1,
- "B": 2,
- }, {
- "C": 3,
- "D": 4,
- },
- ],
- },
- }
- self.writer._AddDictionaryExample(self.doc_root, policy)
- value = json.dumps(policy['example_value']).replace('"', '&quot;')
- self.assertEquals(
- self.doc_root.toxml(),
- '<root>'
- '<dl style="style_dd dl;">'
- '<dt>Windows:</dt>'
- '<dd style="style_.monospace;style_.pre;">MockKey\PolicyName = '
- + value +
- '</dd>'
- '<dt>Android/Linux:</dt>'
- '<dd style="style_.monospace;">PolicyName: ' + value + '</dd>'
- '<dt>Mac:</dt>'
- '<dd style="style_.monospace;style_.pre;">'
- '&lt;key&gt;PolicyName&lt;/key&gt;\n'
- '&lt;dict&gt;\n'
- ' &lt;key&gt;DictList&lt;/key&gt;\n'
- ' &lt;array&gt;\n'
- ' &lt;dict&gt;\n'
- ' &lt;key&gt;A&lt;/key&gt;\n'
- ' &lt;integer&gt;1&lt;/integer&gt;\n'
- ' &lt;key&gt;B&lt;/key&gt;\n'
- ' &lt;integer&gt;2&lt;/integer&gt;\n'
- ' &lt;/dict&gt;\n'
- ' &lt;dict&gt;\n'
- ' &lt;key&gt;C&lt;/key&gt;\n'
- ' &lt;integer&gt;3&lt;/integer&gt;\n'
- ' &lt;key&gt;D&lt;/key&gt;\n'
- ' &lt;integer&gt;4&lt;/integer&gt;\n'
- ' &lt;/dict&gt;\n'
- ' &lt;/array&gt;\n'
- ' &lt;key&gt;False&lt;/key&gt;\n'
- ' &lt;false/&gt;\n'
- ' &lt;key&gt;Integer&lt;/key&gt;\n'
- ' &lt;integer&gt;123&lt;/integer&gt;\n'
- ' &lt;key&gt;List&lt;/key&gt;\n'
- ' &lt;array&gt;\n'
- ' &lt;string&gt;1&lt;/string&gt;\n'
- ' &lt;string&gt;2&lt;/string&gt;\n'
- ' &lt;string&gt;3&lt;/string&gt;\n'
- ' &lt;/array&gt;\n'
- ' &lt;key&gt;ProxyMode&lt;/key&gt;\n'
- ' &lt;string&gt;direct&lt;/string&gt;\n'
- ' &lt;key&gt;True&lt;/key&gt;\n'
- ' &lt;true/&gt;\n'
- '&lt;/dict&gt;'
- '</dd>'
- '</dl>'
- '</root>')
-
- def testParagraphs(self):
- text = 'Paragraph 1\n\nParagraph 2\n\nParagraph 3'
- self.writer._AddParagraphs(self.doc_root, text)
- self.assertEquals(
- self.doc_root.toxml(),
- '<root><p>Paragraph 1</p><p>Paragraph 2</p><p>Paragraph 3</p></root>')
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/json_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/json_writer.py
deleted file mode 100755
index 4dfd282064d..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/json_writer.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-
-from textwrap import TextWrapper
-from grit.format.policy_templates.writers import template_writer
-
-
-TEMPLATE_HEADER="""\
-// Policy template for Linux.
-// Uncomment the policies you wish to activate and change their values to
-// something useful for your case. The provided values are for reference only
-// and do not provide meaningful defaults!
-{"""
-
-
-HEADER_DELIMETER="""\
- //-------------------------------------------------------------------------"""
-
-
-def GetWriter(config):
- '''Factory method for creating JsonWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return JsonWriter(['linux'], config)
-
-
-class JsonWriter(template_writer.TemplateWriter):
- '''Class for generating policy files in JSON format (for Linux). The
- generated files will define all the supported policies with example values
- set for them. This class is used by PolicyTemplateGenerator to write .json
- files.
- '''
-
- def PreprocessPolicies(self, policy_list):
- return self.FlattenGroupsAndSortPolicies(policy_list)
-
- def WriteComment(self, comment):
- self._out.append('// ' + comment)
-
- def WritePolicy(self, policy):
- if policy['type'] == 'external':
- # This type can only be set through cloud policy.
- return
- example_value_str = json.dumps(policy['example_value'], sort_keys=True)
-
- # Add comma to the end of the previous line.
- if not self._first_written:
- self._out[-2] += ','
-
- if not self.CanBeMandatory(policy) and self.CanBeRecommended(policy):
- line = ' // Note: this policy is supported only in recommended mode.'
- self._out.append(line)
- line = ' // The JSON file should be placed in %srecommended.' % \
- self.config['linux_policy_path']
- self._out.append(line)
-
- line = ' // %s' % policy['caption']
- self._out.append(line)
- self._out.append(HEADER_DELIMETER)
- description = self._text_wrapper.wrap(policy['desc'])
- self._out += description;
- line = ' //"%s": %s' % (policy['name'], example_value_str)
- self._out.append('')
- self._out.append(line)
- self._out.append('')
-
- self._first_written = False
-
- def BeginTemplate(self):
- if self._GetChromiumVersionString() is not None:
- self.WriteComment(self.config['build'] + ''' version: ''' + \
- self._GetChromiumVersionString())
- self._out.append(TEMPLATE_HEADER)
-
- def EndTemplate(self):
- self._out.append('}')
-
- def Init(self):
- self._out = []
- # The following boolean member is true until the first policy is written.
- self._first_written = True
- # Create the TextWrapper object once.
- self._text_wrapper = TextWrapper(
- initial_indent = ' // ',
- subsequent_indent = ' // ',
- break_long_words = False,
- width = 80)
-
- def GetTemplateText(self):
- return '\n'.join(self._out)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/json_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/json_writer_unittest.py
deleted file mode 100755
index 8f3c74599ff..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/json_writer_unittest.py
+++ /dev/null
@@ -1,429 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.json_writer'''
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-
-from grit.format.policy_templates.writers import writer_unittest_common
-
-
-TEMPLATE_HEADER="""\
-// Policy template for Linux.
-// Uncomment the policies you wish to activate and change their values to
-// something useful for your case. The provided values are for reference only
-// and do not provide meaningful defaults!
-{
-"""
-
-TEMPLATE_HEADER_WITH_VERSION="""\
-// chromium version: 39.0.0.0
-// Policy template for Linux.
-// Uncomment the policies you wish to activate and change their values to
-// something useful for your case. The provided values are for reference only
-// and do not provide meaningful defaults!
-{
-"""
-
-
-HEADER_DELIMETER="""\
- //-------------------------------------------------------------------------
-"""
-
-
-class JsonWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests for JsonWriter.'''
-
- def CompareOutputs(self, output, expected_output):
- '''Compares the output of the json_writer with its expected output.
-
- Args:
- output: The output of the json writer as returned by grit.
- expected_output: The expected output.
-
- Raises:
- AssertionError: if the two strings are not equivalent.
- '''
- self.assertEquals(
- output.strip(),
- expected_output.strip())
-
- def testEmpty(self):
- # Test the handling of an empty policy list.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": [],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium': '1'}, 'json', 'en')
- expected_output = TEMPLATE_HEADER + '}'
- self.CompareOutputs(output, expected_output)
-
- def testEmptyWithVersion(self):
- # Test the handling of an empty policy list.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": [],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(
- grd, 'fr', {'_chromium': '1', 'version':'39.0.0.0'}, 'json', 'en')
- expected_output = TEMPLATE_HEADER_WITH_VERSION + '}'
- self.CompareOutputs(output, expected_output)
-
- def testMainPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "MainPolicy",'
- ' "type": "main",'
- ' "caption": "Example Main Policy",'
- ' "desc": "Example Main Policy",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": True'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example Main Policy\n' +
- HEADER_DELIMETER +
- ' // Example Main Policy\n\n'
- ' //"MainPolicy": true\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testRecommendedOnlyPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "MainPolicy",'
- ' "type": "main",'
- ' "caption": "Example Main Policy",'
- ' "desc": "Example Main Policy",'
- ' "features": {'
- ' "can_be_recommended": True,'
- ' "can_be_mandatory": False'
- ' },'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": True'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Note: this policy is supported only in recommended mode.\n' +
- ' // The JSON file should be placed in' +
- ' /etc/opt/chrome/policies/recommended.\n' +
- ' // Example Main Policy\n' +
- HEADER_DELIMETER +
- ' // Example Main Policy\n\n'
- ' //"MainPolicy": true\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testStringPolicy(self):
- # Tests a policy group with a single policy of type 'string'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "StringPolicy",'
- ' "type": "string",'
- ' "caption": "Example String Policy",'
- ' "desc": "Example String Policy",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": "hello, world!"'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example String Policy\n' +
- HEADER_DELIMETER +
- ' // Example String Policy\n\n'
- ' //"StringPolicy": "hello, world!"\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testIntPolicy(self):
- # Tests a policy group with a single policy of type 'string'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "IntPolicy",'
- ' "type": "int",'
- ' "caption": "Example Int Policy",'
- ' "desc": "Example Int Policy",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": 15'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example Int Policy\n' +
- HEADER_DELIMETER +
- ' // Example Int Policy\n\n'
- ' //"IntPolicy": 15\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testIntEnumPolicy(self):
- # Tests a policy group with a single policy of type 'int-enum'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "EnumPolicy",'
- ' "type": "int-enum",'
- ' "caption": "Example Int Enum",'
- ' "desc": "Example Int Enum",'
- ' "items": ['
- ' {"name": "ProxyServerDisabled", "value": 0, "caption": ""},'
- ' {"name": "ProxyServerAutoDetect", "value": 1, "caption": ""},'
- ' ],'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": 1'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example Int Enum\n' +
- HEADER_DELIMETER +
- ' // Example Int Enum\n\n'
- ' //"EnumPolicy": 1\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testStringEnumPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "EnumPolicy",'
- ' "type": "string-enum",'
- ' "caption": "Example String Enum",'
- ' "desc": "Example String Enum",'
- ' "items": ['
- ' {"name": "ProxyServerDisabled", "value": "one",'
- ' "caption": ""},'
- ' {"name": "ProxyServerAutoDetect", "value": "two",'
- ' "caption": ""},'
- ' ],'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": "one"'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example String Enum\n' +
- HEADER_DELIMETER +
- ' // Example String Enum\n\n'
- ' //"EnumPolicy": "one"\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testListPolicy(self):
- # Tests a policy group with a single policy of type 'list'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "ListPolicy",'
- ' "type": "list",'
- ' "caption": "Example List",'
- ' "desc": "Example List",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": ["foo", "bar"]'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example List\n' +
- HEADER_DELIMETER +
- ' // Example List\n\n'
- ' //"ListPolicy": ["foo", "bar"]\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testStringEnumListPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum-list'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "ListPolicy",'
- ' "type": "string-enum-list",'
- ' "caption": "Example List",'
- ' "desc": "Example List",'
- ' "items": ['
- ' {"name": "ProxyServerDisabled", "value": "one",'
- ' "caption": ""},'
- ' {"name": "ProxyServerAutoDetect", "value": "two",'
- ' "caption": ""},'
- ' ],'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": ["one", "two"]'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example List\n' +
- HEADER_DELIMETER +
- ' // Example List\n\n'
- ' //"ListPolicy": ["one", "two"]\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testDictionaryPolicy(self):
- # Tests a policy group with a single policy of type 'dict'.
- example = {
- 'bool': True,
- 'dict': {
- 'a': 1,
- 'b': 2,
- },
- 'int': 10,
- 'list': [1, 2, 3],
- 'string': 'abc',
- }
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "DictionaryPolicy",'
- ' "type": "dict",'
- ' "caption": "Example Dictionary Policy",'
- ' "desc": "Example Dictionary Policy",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": ' + str(example) +
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Example Dictionary Policy\n' +
- HEADER_DELIMETER +
- ' // Example Dictionary Policy\n\n'
- ' //"DictionaryPolicy": {"bool": true, "dict": {"a": 1, '
- '"b": 2}, "int": 10, "list": [1, 2, 3], "string": "abc"}\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
- def testNonSupportedPolicy(self):
- # Tests a policy that is not supported on Linux, so it shouldn't
- # be included in the JSON file.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "NonLinuxPolicy",'
- ' "type": "list",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.mac:8-"],'
- ' "example_value": ["a"]'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = TEMPLATE_HEADER + '}'
- self.CompareOutputs(output, expected_output)
-
- def testPolicyGroup(self):
- # Tests a policy group that has more than one policies.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "Group1",'
- ' "type": "group",'
- ' "caption": "",'
- ' "desc": "",'
- ' "policies": [{'
- ' "name": "Policy1",'
- ' "type": "list",'
- ' "caption": "Policy One",'
- ' "desc": "Policy One",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": ["a", "b"]'
- ' },{'
- ' "name": "Policy2",'
- ' "type": "string",'
- ' "caption": "Policy Two",'
- ' "desc": "Policy Two",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": "c"'
- ' }],'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'json', 'en')
- expected_output = (
- TEMPLATE_HEADER +
- ' // Policy One\n' +
- HEADER_DELIMETER +
- ' // Policy One\n\n'
- ' //"Policy1": ["a", "b"],\n\n'
- ' // Policy Two\n' +
- HEADER_DELIMETER +
- ' // Policy Two\n\n'
- ' //"Policy2": "c"\n\n'
- '}')
- self.CompareOutputs(output, expected_output)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/mock_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/mock_writer.py
deleted file mode 100755
index 3db3a5432b9..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/mock_writer.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from template_writer import TemplateWriter
-
-
-class MockWriter(TemplateWriter):
- '''Helper class for unit tests in policy_template_generator_unittest.py
- '''
-
- def __init__(self):
- pass
-
- def WritePolicy(self, policy):
- pass
-
- def BeginTemplate(self):
- pass
-
- def GetTemplateText(self):
- pass
-
- def IsPolicySupported(self, policy):
- return True
-
- def Test(self):
- pass
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/plist_helper.py b/chromium/tools/grit/grit/format/policy_templates/writers/plist_helper.py
deleted file mode 100755
index 0c599ca7a85..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/plist_helper.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-'''Common functions for plist_writer and plist_strings_writer.
-'''
-
-
-def GetPlistFriendlyName(name):
- '''Transforms a string so that it will be suitable for use as
- a pfm_name in the plist manifest file.
- '''
- return name.replace(' ', '_')
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer.py
deleted file mode 100755
index 4257bf8e945..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from grit.format.policy_templates.writers import plist_helper
-from grit.format.policy_templates.writers import template_writer
-
-
-def GetWriter(config):
- '''Factory method for creating PListStringsWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return PListStringsWriter(['mac'], config)
-
-
-class PListStringsWriter(template_writer.TemplateWriter):
- '''Outputs localized string table files for the Mac policy file.
- These files are named Localizable.strings and they are in the
- [lang].lproj subdirectories of the manifest bundle.
- '''
-
- def WriteComment(self, comment):
- self._out.append('/* ' + comment + ' */' )
-
- def _AddToStringTable(self, item_name, caption, desc):
- '''Add a title and a description of an item to the string table.
-
- Args:
- item_name: The name of the item that will get the title and the
- description.
- title: The text of the title to add.
- desc: The text of the description to add.
- '''
- caption = caption.replace('"', '\\"')
- caption = caption.replace('\n', '\\n')
- desc = desc.replace('"', '\\"')
- desc = desc.replace('\n', '\\n')
- self._out.append('%s.pfm_title = \"%s\";' % (item_name, caption))
- self._out.append('%s.pfm_description = \"%s\";' % (item_name, desc))
-
- def PreprocessPolicies(self, policy_list):
- return self.FlattenGroupsAndSortPolicies(policy_list)
-
- def WritePolicy(self, policy):
- '''Add strings to the stringtable corresponding a given policy.
-
- Args:
- policy: The policy for which the strings will be added to the
- string table.
- '''
- desc = policy['desc']
- if policy['type'] == 'external':
- # This type can only be set through cloud policy.
- return
- elif policy['type'] in ('int-enum','string-enum', 'string-enum-list'):
- # Append the captions of enum items to the description string.
- item_descs = []
- for item in policy['items']:
- item_descs.append(str(item['value']) + ' - ' + item['caption'])
- desc = '\n'.join(item_descs) + '\n' + desc
-
- self._AddToStringTable(policy['name'], policy['label'], desc)
-
- def BeginTemplate(self):
- app_name = plist_helper.GetPlistFriendlyName(self.config['app_name'])
- if self._GetChromiumVersionString() is not None:
- self.WriteComment(self.config['build'] + ''' version: ''' + \
- self._GetChromiumVersionString())
- self._AddToStringTable(
- app_name,
- self.config['app_name'],
- self.messages['mac_chrome_preferences']['text'])
-
- def Init(self):
- # A buffer for the lines of the string table being generated.
- self._out = []
-
- def GetTemplateText(self):
- return '\n'.join(self._out)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer_unittest.py
deleted file mode 100755
index efad6f223fe..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/plist_strings_writer_unittest.py
+++ /dev/null
@@ -1,411 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.plist_strings_writer'''
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-
-from grit.format.policy_templates.writers import writer_unittest_common
-
-
-class PListStringsWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests for PListStringsWriter.'''
-
- def testEmpty(self):
- # Test PListStringsWriter in case of empty polices.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': '$1 preferen"ces',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium': '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Chromium.pfm_title = "Chromium";\n'
- 'Chromium.pfm_description = "Chromium preferen\\"ces";')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testEmptyVersion(self):
- # Test PListStringsWriter in case of empty polices.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': '$1 preferen"ces',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium': '1',
- 'mac_bundle_id': 'com.example.Test',
- 'version': '39.0.0.0'},
- 'plist_strings',
- 'en')
- expected_output = (
- '/* chromium version: 39.0.0.0 */\n'
- 'Chromium.pfm_title = "Chromium";\n'
- 'Chromium.pfm_description = "Chromium preferen\\"ces";')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testMainPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainGroup',
- 'type': 'group',
- 'caption': 'Caption of main.',
- 'desc': 'Description of main.',
- 'policies': [{
- 'name': 'MainPolicy',
- 'type': 'main',
- 'supported_on': ['chrome.mac:8-'],
- 'caption': 'Caption of main policy.',
- 'desc': 'Description of main policy.',
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': 'Preferences of $1',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Google_Chrome.pfm_title = "Google Chrome";\n'
- 'Google_Chrome.pfm_description = "Preferences of Google Chrome";\n'
- 'MainPolicy.pfm_title = "Caption of main policy.";\n'
- 'MainPolicy.pfm_description = "Description of main policy.";')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringPolicy(self):
- # Tests a policy group with a single policy of type 'string'. Also test
- # inheriting group description to policy description.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'StringGroup',
- 'type': 'group',
- 'caption': 'Caption of group.',
- 'desc': """Description of group.
-With a newline.""",
- 'policies': [{
- 'name': 'StringPolicy',
- 'type': 'string',
- 'caption': 'Caption of policy.',
- 'desc': """Description of policy.
-With a newline.""",
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': 'Preferences of $1',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Chromium.pfm_title = "Chromium";\n'
- 'Chromium.pfm_description = "Preferences of Chromium";\n'
- 'StringPolicy.pfm_title = "Caption of policy.";\n'
- 'StringPolicy.pfm_description = '
- '"Description of policy.\\nWith a newline.";')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringListPolicy(self):
- # Tests a policy group with a single policy of type 'list'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'ListGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'ListPolicy',
- 'type': 'list',
- 'caption': 'Caption of policy.',
- 'desc': """Description of policy.
-With a newline.""",
- 'schema': {
- 'type': 'array',
- 'items': { 'type': 'string' },
- },
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': 'Preferences of $1',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Chromium.pfm_title = "Chromium";\n'
- 'Chromium.pfm_description = "Preferences of Chromium";\n'
- 'ListPolicy.pfm_title = "Caption of policy.";\n'
- 'ListPolicy.pfm_description = '
- '"Description of policy.\\nWith a newline.";')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringEnumListPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum-list'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'EnumPolicy',
- 'type': 'string-enum-list',
- 'caption': 'Caption of policy.',
- 'desc': """Description of policy.
-With a newline.""",
- 'schema': {
- 'type': 'array',
- 'items': { 'type': 'string' },
- },
- 'items': [
- {
- 'name': 'ProxyServerDisabled',
- 'value': 'one',
- 'caption': 'Option1'
- },
- {
- 'name': 'ProxyServerAutoDetect',
- 'value': 'two',
- 'caption': 'Option2'
- },
- ],
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': 'Preferences of $1',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Chromium.pfm_title = "Chromium";\n'
- 'Chromium.pfm_description = "Preferences of Chromium";\n'
- 'EnumPolicy.pfm_title = "Caption of policy.";\n'
- 'EnumPolicy.pfm_description = '
- '"one - Option1\\ntwo - Option2\\n'
- 'Description of policy.\\nWith a newline.";')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testIntEnumPolicy(self):
- # Tests a policy group with a single policy of type 'int-enum'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumGroup',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [{
- 'name': 'EnumPolicy',
- 'type': 'int-enum',
- 'desc': 'Description of policy.',
- 'caption': 'Caption of policy.',
- 'items': [
- {
- 'name': 'ProxyServerDisabled',
- 'value': 0,
- 'caption': 'Option1'
- },
- {
- 'name': 'ProxyServerAutoDetect',
- 'value': 1,
- 'caption': 'Option2'
- },
- ],
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': '$1 preferences',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Google_Chrome.pfm_title = "Google Chrome";\n'
- 'Google_Chrome.pfm_description = "Google Chrome preferences";\n'
- 'EnumPolicy.pfm_title = "Caption of policy.";\n'
- 'EnumPolicy.pfm_description = '
- '"0 - Option1\\n1 - Option2\\nDescription of policy.";\n')
-
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringEnumPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumGroup',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [{
- 'name': 'EnumPolicy',
- 'type': 'string-enum',
- 'desc': 'Description of policy.',
- 'caption': 'Caption of policy.',
- 'items': [
- {
- 'name': 'ProxyServerDisabled',
- 'value': 'one',
- 'caption': 'Option1'
- },
- {
- 'name': 'ProxyServerAutoDetect',
- 'value': 'two',
- 'caption': 'Option2'
- },
- ],
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': '$1 preferences',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Google_Chrome.pfm_title = "Google Chrome";\n'
- 'Google_Chrome.pfm_description = "Google Chrome preferences";\n'
- 'EnumPolicy.pfm_title = "Caption of policy.";\n'
- 'EnumPolicy.pfm_description = '
- '"one - Option1\\ntwo - Option2\\nDescription of policy.";\n')
-
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testNonSupportedPolicy(self):
- # Tests a policy that is not supported on Mac, so its strings shouldn't
- # be included in the plist string table.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'NonMacGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'NonMacPolicy',
- 'type': 'string',
- 'caption': '',
- 'desc': '',
- 'supported_on': ['chrome_os:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {
- 'mac_chrome_preferences': {
- 'text': '$1 preferences',
- 'desc': 'blah'
- }
- }
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
- 'plist_strings',
- 'en')
- expected_output = (
- 'Google_Chrome.pfm_title = "Google Chrome";\n'
- 'Google_Chrome.pfm_description = "Google Chrome preferences";')
- self.assertEquals(output.strip(), expected_output.strip())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/plist_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/plist_writer.py
deleted file mode 100755
index 52bd9814f12..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/plist_writer.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from xml.dom import minidom
-from grit.format.policy_templates.writers import plist_helper
-from grit.format.policy_templates.writers import xml_formatted_writer
-
-
-# This writer outputs a Preferences Manifest file as documented at
-# https://developer.apple.com/library/mac/documentation/MacOSXServer/Conceptual/Preference_Manifest_Files
-
-
-def GetWriter(config):
- '''Factory method for creating PListWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return PListWriter(['mac'], config)
-
-
-class PListWriter(xml_formatted_writer.XMLFormattedWriter):
- '''Class for generating policy templates in Mac plist format.
- It is used by PolicyTemplateGenerator to write plist files.
- '''
-
- STRING_TABLE = 'Localizable.strings'
- TYPE_TO_INPUT = {
- 'string': 'string',
- 'int': 'integer',
- 'int-enum': 'integer',
- 'string-enum': 'string',
- 'string-enum-list': 'array',
- 'main': 'boolean',
- 'list': 'array',
- 'dict': 'dictionary',
- }
-
- def _AddKeyValuePair(self, parent, key_string, value_tag):
- '''Adds a plist key-value pair to a parent XML element.
-
- A key-value pair in plist consists of two XML elements next two each other:
- <key>key_string</key>
- <value_tag>...</value_tag>
-
- Args:
- key_string: The content of the key tag.
- value_tag: The name of the value element.
-
- Returns:
- The XML element of the value tag.
- '''
- self.AddElement(parent, 'key', {}, key_string)
- return self.AddElement(parent, value_tag)
-
- def _AddStringKeyValuePair(self, parent, key_string, value_string):
- '''Adds a plist key-value pair to a parent XML element, where the
- value element contains a string. The name of the value element will be
- <string>.
-
- Args:
- key_string: The content of the key tag.
- value_string: The content of the value tag.
- '''
- self.AddElement(parent, 'key', {}, key_string)
- self.AddElement(parent, 'string', {}, value_string)
-
- def _AddRealKeyValuePair(self, parent, key_string, value_string):
- '''Adds a plist key-value pair to a parent XML element, where the
- value element contains a real number. The name of the value element will be
- <real>.
-
- Args:
- key_string: The content of the key tag.
- value_string: The content of the value tag.
- '''
- self.AddElement(parent, 'key', {}, key_string)
- self.AddElement(parent, 'real', {}, value_string)
-
- def _AddTargets(self, parent, policy):
- '''Adds the following XML snippet to an XML element:
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
-
- Args:
- parent: The parent XML element where the snippet will be added.
- '''
- array = self._AddKeyValuePair(parent, 'pfm_targets', 'array')
- if self.CanBeRecommended(policy):
- self.AddElement(array, 'string', {}, 'user')
- if self.CanBeMandatory(policy):
- self.AddElement(array, 'string', {}, 'user-managed')
-
- def PreprocessPolicies(self, policy_list):
- return self.FlattenGroupsAndSortPolicies(policy_list)
-
- def WritePolicy(self, policy):
- policy_name = policy['name']
- policy_type = policy['type']
- if policy_type == 'external':
- # This type can only be set through cloud policy.
- return
-
- dict = self.AddElement(self._array, 'dict')
- self._AddStringKeyValuePair(dict, 'pfm_name', policy_name)
- # Set empty strings for title and description. They will be taken by the
- # OSX Workgroup Manager from the string table in a Localizable.strings file.
- # Those files are generated by plist_strings_writer.
- self._AddStringKeyValuePair(dict, 'pfm_description', '')
- self._AddStringKeyValuePair(dict, 'pfm_title', '')
- self._AddTargets(dict, policy)
- self._AddStringKeyValuePair(dict, 'pfm_type',
- self.TYPE_TO_INPUT[policy_type])
- if policy_type in ('int-enum', 'string-enum'):
- range_list = self._AddKeyValuePair(dict, 'pfm_range_list', 'array')
- for item in policy['items']:
- if policy_type == 'int-enum':
- element_type = 'integer'
- else:
- element_type = 'string'
- self.AddElement(range_list, element_type, {}, str(item['value']))
- elif policy_type in ('list', 'string-enum-list'):
- subkeys = self._AddKeyValuePair(dict, 'pfm_subkeys', 'array')
- subkeys_dict = self.AddElement(subkeys, 'dict')
- subkeys_type = self._AddKeyValuePair(subkeys_dict, 'pfm_type', 'string')
- self.AddText(subkeys_type, 'string')
-
- def BeginTemplate(self):
- self._plist.attributes['version'] = '1'
- dict = self.AddElement(self._plist, 'dict')
- if self._GetChromiumVersionString() is not None:
- self.AddComment(self._plist, self.config['build'] + ' version: ' + \
- self._GetChromiumVersionString())
- app_name = plist_helper.GetPlistFriendlyName(self.config['app_name'])
- self._AddStringKeyValuePair(dict, 'pfm_name', app_name)
- self._AddStringKeyValuePair(dict, 'pfm_description', '')
- self._AddStringKeyValuePair(dict, 'pfm_title', '')
- self._AddRealKeyValuePair(dict, 'pfm_version', '1')
- self._AddStringKeyValuePair(dict, 'pfm_domain',
- self.config['mac_bundle_id'])
-
- self._array = self._AddKeyValuePair(dict, 'pfm_subkeys', 'array')
-
- def CreatePlistDocument(self):
- dom_impl = minidom.getDOMImplementation('')
- doctype = dom_impl.createDocumentType(
- 'plist',
- '-//Apple//DTD PLIST 1.0//EN',
- 'http://www.apple.com/DTDs/PropertyList-1.0.dtd')
- return dom_impl.createDocument(None, 'plist', doctype)
-
- def Init(self):
- self._doc = self.CreatePlistDocument()
- self._plist = self._doc.documentElement
-
- def GetTemplateText(self):
- return self.ToPrettyXml(self._doc)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/plist_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/plist_writer_unittest.py
deleted file mode 100755
index fd1b81f56cf..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/plist_writer_unittest.py
+++ /dev/null
@@ -1,691 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.plist_writer'''
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-
-from grit.format.policy_templates.writers import writer_unittest_common
-
-
-class PListWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests for PListWriter.'''
-
- def _GetExpectedOutputs(self, product_name, bundle_id, policies):
- '''Substitutes the variable parts into a plist template. The result
- of this function can be used as an expected result to test the output
- of PListWriter.
-
- Args:
- product_name: The name of the product, normally Chromium or Google Chrome.
- bundle_id: The mac bundle id of the product.
- policies: The list of policies.
-
- Returns:
- The text of a plist template with the variable parts substituted.
- '''
- return '''
-<?xml version="1.0" ?>
-<!DOCTYPE plist PUBLIC '-//Apple//DTD PLIST 1.0//EN' 'http://www.apple.com/DTDs/PropertyList-1.0.dtd'>
-<plist version="1">
- <dict>
- <key>pfm_name</key>
- <string>%s</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_version</key>
- <real>1</real>
- <key>pfm_domain</key>
- <string>%s</string>
- <key>pfm_subkeys</key>
- %s
- </dict>
-</plist>''' % (product_name, bundle_id, policies)
-
- def _GetExpectedOutputsWithVersion(self, product_name, bundle_id, policies,
- version):
- '''Substitutes the variable parts into a plist template. The result
- of this function can be used as an expected result to test the output
- of PListWriter.
-
- Args:
- product_name: The name of the product, normally Chromium or Google Chrome.
- bundle_id: The mac bundle id of the product.
- policies: The list of policies.
-
- Returns:
- The text of a plist template with the variable parts substituted.
- '''
- return '''
-<?xml version="1.0" ?>
-<!DOCTYPE plist PUBLIC '-//Apple//DTD PLIST 1.0//EN' 'http://www.apple.com/DTDs/PropertyList-1.0.dtd'>
-<plist version="1">
- <dict>
- <key>pfm_name</key>
- <string>%s</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_version</key>
- <real>1</real>
- <key>pfm_domain</key>
- <string>%s</string>
- <key>pfm_subkeys</key>
- %s
- </dict>
- <!--%s-->
-</plist>''' % (product_name, bundle_id, policies, version)
-
- def testEmpty(self):
- # Test PListWriter in case of empty polices.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [],
- 'placeholders': [],
- 'messages': {},
- }''')
-
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium': '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '<array/>')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testEmptyVersion(self):
- # Test PListWriter in case of empty polices.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [],
- 'placeholders': [],
- 'messages': {},
- }''')
-
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium': '1',
- 'mac_bundle_id': 'com.example.Test',
- 'version': '39.0.0.0'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputsWithVersion(
- 'Chromium',
- 'com.example.Test',
- '<array/>',
- 'chromium version: 39.0.0.0')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testMainPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainGroup',
- 'type': 'group',
- 'policies': [{
- 'name': 'MainPolicy',
- 'type': 'main',
- 'desc': '',
- 'caption': '',
- 'supported_on': ['chrome.mac:8-'],
- }],
- 'desc': '',
- 'caption': '',
- },
- ],
- 'placeholders': [],
- 'messages': {}
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>MainPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>boolean</string>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testRecommendedPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainGroup',
- 'type': 'group',
- 'policies': [{
- 'name': 'MainPolicy',
- 'type': 'main',
- 'desc': '',
- 'caption': '',
- 'features': {
- 'can_be_recommended' : True
- },
- 'supported_on': ['chrome.mac:8-'],
- }],
- 'desc': '',
- 'caption': '',
- },
- ],
- 'placeholders': [],
- 'messages': {}
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>MainPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user</string>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>boolean</string>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testRecommendedOnlyPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'MainGroup',
- 'type': 'group',
- 'policies': [{
- 'name': 'MainPolicy',
- 'type': 'main',
- 'desc': '',
- 'caption': '',
- 'features': {
- 'can_be_recommended' : True,
- 'can_be_mandatory' : False
- },
- 'supported_on': ['chrome.mac:8-'],
- }],
- 'desc': '',
- 'caption': '',
- },
- ],
- 'placeholders': [],
- 'messages': {}
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>MainPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user</string>
- </array>
- <key>pfm_type</key>
- <string>boolean</string>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringPolicy(self):
- # Tests a policy group with a single policy of type 'string'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'StringGroup',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [{
- 'name': 'StringPolicy',
- 'type': 'string',
- 'supported_on': ['chrome.mac:8-'],
- 'desc': '',
- 'caption': '',
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>StringPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>string</string>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testListPolicy(self):
- # Tests a policy group with a single policy of type 'list'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'ListGroup',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [{
- 'name': 'ListPolicy',
- 'type': 'list',
- 'schema': {
- 'type': 'array',
- 'items': { 'type': 'string' },
- },
- 'supported_on': ['chrome.mac:8-'],
- 'desc': '',
- 'caption': '',
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>ListPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>array</string>
- <key>pfm_subkeys</key>
- <array>
- <dict>
- <key>pfm_type</key>
- <string>string</string>
- </dict>
- </array>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringEnumListPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum-list'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'ListGroup',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [{
- 'name': 'ListPolicy',
- 'type': 'string-enum-list',
- 'schema': {
- 'type': 'array',
- 'items': { 'type': 'string' },
- },
- 'items': [
- {'name': 'ProxyServerDisabled', 'value': 'one', 'caption': ''},
- {'name': 'ProxyServerAutoDetect', 'value': 'two', 'caption': ''},
- ],
- 'supported_on': ['chrome.mac:8-'],
- 'supported_on': ['chrome.mac:8-'],
- 'desc': '',
- 'caption': '',
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>ListPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>array</string>
- <key>pfm_subkeys</key>
- <array>
- <dict>
- <key>pfm_type</key>
- <string>string</string>
- </dict>
- </array>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testIntPolicy(self):
- # Tests a policy group with a single policy of type 'int'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'IntGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'IntPolicy',
- 'type': 'int',
- 'caption': '',
- 'desc': '',
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>IntPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>integer</string>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testIntEnumPolicy(self):
- # Tests a policy group with a single policy of type 'int-enum'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'EnumPolicy',
- 'type': 'int-enum',
- 'desc': '',
- 'caption': '',
- 'items': [
- {'name': 'ProxyServerDisabled', 'value': 0, 'caption': ''},
- {'name': 'ProxyServerAutoDetect', 'value': 1, 'caption': ''},
- ],
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Google_Chrome', 'com.example.Test2', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>EnumPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>integer</string>
- <key>pfm_range_list</key>
- <array>
- <integer>0</integer>
- <integer>1</integer>
- </array>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testStringEnumPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'EnumGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'EnumPolicy',
- 'type': 'string-enum',
- 'desc': '',
- 'caption': '',
- 'items': [
- {'name': 'ProxyServerDisabled', 'value': 'one', 'caption': ''},
- {'name': 'ProxyServerAutoDetect', 'value': 'two', 'caption': ''},
- ],
- 'supported_on': ['chrome.mac:8-'],
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Google_Chrome', 'com.example.Test2', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>EnumPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>string</string>
- <key>pfm_range_list</key>
- <array>
- <string>one</string>
- <string>two</string>
- </array>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testDictionaryPolicy(self):
- # Tests a policy group with a single policy of type 'dict'.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'DictionaryGroup',
- 'type': 'group',
- 'desc': '',
- 'caption': '',
- 'policies': [{
- 'name': 'DictionaryPolicy',
- 'type': 'dict',
- 'supported_on': ['chrome.mac:8-'],
- 'desc': '',
- 'caption': '',
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Chromium', 'com.example.Test', '''<array>
- <dict>
- <key>pfm_name</key>
- <string>DictionaryPolicy</string>
- <key>pfm_description</key>
- <string/>
- <key>pfm_title</key>
- <string/>
- <key>pfm_targets</key>
- <array>
- <string>user-managed</string>
- </array>
- <key>pfm_type</key>
- <string>dictionary</string>
- </dict>
- </array>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
- def testNonSupportedPolicy(self):
- # Tests a policy that is not supported on Mac, so it shouldn't
- # be included in the plist file.
- grd = self.PrepareTest('''
- {
- 'policy_definitions': [
- {
- 'name': 'NonMacGroup',
- 'type': 'group',
- 'caption': '',
- 'desc': '',
- 'policies': [{
- 'name': 'NonMacPolicy',
- 'type': 'string',
- 'supported_on': ['chrome.linux:8-', 'chrome.win:7-'],
- 'caption': '',
- 'desc': '',
- }],
- },
- ],
- 'placeholders': [],
- 'messages': {},
- }''')
- output = self.GetOutput(
- grd,
- 'fr',
- {'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
- 'plist',
- 'en')
- expected_output = self._GetExpectedOutputs(
- 'Google_Chrome', 'com.example.Test2', '''<array/>''')
- self.assertEquals(output.strip(), expected_output.strip())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/reg_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/reg_writer.py
deleted file mode 100755
index 70c87a32c6f..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/reg_writer.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-import json
-
-from grit.format.policy_templates.writers import template_writer
-
-
-def GetWriter(config):
- '''Factory method for creating RegWriter objects.
- See the constructor of TemplateWriter for description of
- arguments.
- '''
- return RegWriter(['win'], config)
-
-
-class RegWriter(template_writer.TemplateWriter):
- '''Class for generating policy example files in .reg format (for Windows).
- The generated files will define all the supported policies with example
- values set for them. This class is used by PolicyTemplateGenerator to
- write .reg files.
- '''
-
- NEWLINE = '\r\n'
-
- def _EscapeRegString(self, string):
- return string.replace('\\', '\\\\').replace('\"', '\\\"')
-
- def _StartBlock(self, key, suffix, list):
- key = 'HKEY_LOCAL_MACHINE\\' + key
- if suffix:
- key = key + '\\' + suffix
- if key != self._last_key.get(id(list), None):
- list.append('')
- list.append('[%s]' % key)
- self._last_key[id(list)] = key
-
- def PreprocessPolicies(self, policy_list):
- return self.FlattenGroupsAndSortPolicies(policy_list,
- self.GetPolicySortingKey)
-
- def GetPolicySortingKey(self, policy):
- '''Extracts a sorting key from a policy. These keys can be used for
- list.sort() methods to sort policies.
- See TemplateWriter.SortPoliciesGroupsFirst for usage.
- '''
- is_list = policy['type'] in ('list', 'string-enum-list')
- # Lists come after regular policies.
- return (is_list, policy['name'])
-
- def _WritePolicy(self, policy, key, list):
- example_value = policy['example_value']
-
- if policy['type'] == 'external':
- # This type can only be set through cloud policy.
- return
- elif policy['type'] in ('list', 'string-enum-list'):
- self._StartBlock(key, policy['name'], list)
- i = 1
- for item in example_value:
- escaped_str = self._EscapeRegString(item)
- list.append('"%d"="%s"' % (i, escaped_str))
- i = i + 1
- else:
- self._StartBlock(key, None, list)
- if policy['type'] in ('string', 'string-enum', 'dict'):
- example_value_str = json.dumps(example_value, sort_keys=True)
- if policy['type'] == 'dict':
- example_value_str = '"%s"' % example_value_str
- elif policy['type'] == 'main':
- if example_value == True:
- example_value_str = 'dword:00000001'
- else:
- example_value_str = 'dword:00000000'
- elif policy['type'] in ('int', 'int-enum'):
- example_value_str = 'dword:%08x' % example_value
- else:
- raise Exception('unknown policy type %s:' % policy['type'])
-
- list.append('"%s"=%s' % (policy['name'], example_value_str))
-
- def WriteComment(self, comment):
- self._prefix.append('; ' + comment)
-
- def WritePolicy(self, policy):
- if self.CanBeMandatory(policy):
- self._WritePolicy(policy,
- self.config['win_reg_mandatory_key_name'],
- self._mandatory)
-
- def WriteRecommendedPolicy(self, policy):
- self._WritePolicy(policy,
- self.config['win_reg_recommended_key_name'],
- self._recommended)
-
- def BeginTemplate(self):
- pass
-
- def EndTemplate(self):
- pass
-
- def Init(self):
- self._mandatory = []
- self._recommended = []
- self._last_key = {}
- self._prefix = []
-
- def GetTemplateText(self):
- self._prefix.append('Windows Registry Editor Version 5.00')
- if self._GetChromiumVersionString() is not None:
- self.WriteComment(self.config['build'] + ' version: ' + \
- self._GetChromiumVersionString())
- all = self._prefix + self._mandatory + self._recommended
- return self.NEWLINE.join(all)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/reg_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/reg_writer_unittest.py
deleted file mode 100755
index 2851a8bf69c..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/reg_writer_unittest.py
+++ /dev/null
@@ -1,392 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-'''Unit tests for grit.format.policy_templates.writers.reg_writer'''
-
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-
-from grit.format.policy_templates.writers import writer_unittest_common
-
-
-class RegWriterUnittest(writer_unittest_common.WriterUnittestCommon):
- '''Unit tests for RegWriter.'''
-
- NEWLINE = '\r\n'
-
- def CompareOutputs(self, output, expected_output):
- '''Compares the output of the reg_writer with its expected output.
-
- Args:
- output: The output of the reg writer as returned by grit.
- expected_output: The expected output.
-
- Raises:
- AssertionError: if the two strings are not equivalent.
- '''
- self.assertEquals(
- output.strip(),
- expected_output.strip())
-
- def testEmpty(self):
- # Test the handling of an empty policy list.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": [],'
- ' "placeholders": [],'
- ' "messages": {}'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium': '1', }, 'reg', 'en')
- expected_output = 'Windows Registry Editor Version 5.00'
- self.CompareOutputs(output, expected_output)
-
- def testEmptyVersion(self):
- # Test the handling of an empty policy list.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": [],'
- ' "placeholders": [],'
- ' "messages": {}'
- '}')
- output = self.GetOutput(
- grd, 'fr', {'_chromium': '1', 'version': '39.0.0.0' }, 'reg', 'en')
- expected_output = ('Windows Registry Editor Version 5.00\r\n'
- '; chromium version: 39.0.0.0\r\n')
- self.CompareOutputs(output, expected_output)
-
- def testMainPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "MainPolicy",'
- ' "type": "main",'
- ' "features": { "can_be_recommended": True },'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": True'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome]',
- '"MainPolicy"=dword:00000001',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome\\Recommended]',
- '"MainPolicy"=dword:00000001'])
- self.CompareOutputs(output, expected_output)
-
- def testRecommendedMainPolicy(self):
- # Tests a policy group with a single policy of type 'main'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "MainPolicy",'
- ' "type": "main",'
- ' "features": {'
- ' "can_be_recommended": True,'
- ' "can_be_mandatory": False '
- ' },'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": True'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome\\Recommended]',
- '"MainPolicy"=dword:00000001'])
- self.CompareOutputs(output, expected_output)
-
- def testStringPolicy(self):
- # Tests a policy group with a single policy of type 'string'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "StringPolicy",'
- ' "type": "string",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": "hello, world! \\\" \\\\"'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
- '"StringPolicy"="hello, world! \\\" \\\\"'])
- self.CompareOutputs(output, expected_output)
-
- def testIntPolicy(self):
- # Tests a policy group with a single policy of type 'int'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "IntPolicy",'
- ' "type": "int",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": 26'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
- '"IntPolicy"=dword:0000001a'])
- self.CompareOutputs(output, expected_output)
-
- def testIntEnumPolicy(self):
- # Tests a policy group with a single policy of type 'int-enum'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "EnumPolicy",'
- ' "type": "int-enum",'
- ' "caption": "",'
- ' "desc": "",'
- ' "items": ['
- ' {"name": "ProxyServerDisabled", "value": 0, "caption": ""},'
- ' {"name": "ProxyServerAutoDetect", "value": 1, "caption": ""},'
- ' ],'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": 1'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome]',
- '"EnumPolicy"=dword:00000001'])
- self.CompareOutputs(output, expected_output)
-
- def testStringEnumPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "EnumPolicy",'
- ' "type": "string-enum",'
- ' "caption": "",'
- ' "desc": "",'
- ' "items": ['
- ' {"name": "ProxyServerDisabled", "value": "one",'
- ' "caption": ""},'
- ' {"name": "ProxyServerAutoDetect", "value": "two",'
- ' "caption": ""},'
- ' ],'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": "two"'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_google_chrome': '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Google\\Chrome]',
- '"EnumPolicy"="two"'])
- self.CompareOutputs(output, expected_output)
-
- def testListPolicy(self):
- # Tests a policy group with a single policy of type 'list'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "ListPolicy",'
- ' "type": "list",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": ["foo", "bar"]'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium\\ListPolicy]',
- '"1"="foo"',
- '"2"="bar"'])
-
- def testStringEnumListPolicy(self):
- # Tests a policy group with a single policy of type 'string-enum-list'.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "ListPolicy",'
- ' "type": "string-enum-list",'
- ' "caption": "",'
- ' "desc": "",'
- ' "items": ['
- ' {"name": "ProxyServerDisabled", "value": "foo",'
- ' "caption": ""},'
- ' {"name": "ProxyServerAutoDetect", "value": "bar",'
- ' "caption": ""},'
- ' ],'
- ' "supported_on": ["chrome.linux:8-"],'
- ' "example_value": ["foo", "bar"]'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium\\ListPolicy]',
- '"1"="foo"',
- '"2"="bar"'])
-
- def testDictionaryPolicy(self):
- # Tests a policy group with a single policy of type 'dict'.
- example = {
- 'bool': True,
- 'dict': {
- 'a': 1,
- 'b': 2,
- },
- 'int': 10,
- 'list': [1, 2, 3],
- 'string': 'abc',
- }
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "DictionaryPolicy",'
- ' "type": "dict",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": ' + str(example) +
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
- '"DictionaryPolicy"="{"bool": true, "dict": {"a": 1, '
- '"b": 2}, "int": 10, "list": [1, 2, 3], "string": "abc"}"'])
- self.CompareOutputs(output, expected_output)
-
- def testNonSupportedPolicy(self):
- # Tests a policy that is not supported on Windows, so it shouldn't
- # be included in the .REG file.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "NonWindowsPolicy",'
- ' "type": "list",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.mac:8-"],'
- ' "example_value": ["a"]'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00'])
- self.CompareOutputs(output, expected_output)
-
- def testPolicyGroup(self):
- # Tests a policy group that has more than one policies.
- grd = self.PrepareTest(
- '{'
- ' "policy_definitions": ['
- ' {'
- ' "name": "Group1",'
- ' "type": "group",'
- ' "caption": "",'
- ' "desc": "",'
- ' "policies": [{'
- ' "name": "Policy1",'
- ' "type": "list",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": ["a", "b"]'
- ' },{'
- ' "name": "Policy2",'
- ' "type": "string",'
- ' "caption": "",'
- ' "desc": "",'
- ' "supported_on": ["chrome.win:8-"],'
- ' "example_value": "c"'
- ' }],'
- ' },'
- ' ],'
- ' "placeholders": [],'
- ' "messages": {},'
- '}')
- output = self.GetOutput(grd, 'fr', {'_chromium' : '1'}, 'reg', 'en')
- expected_output = self.NEWLINE.join([
- 'Windows Registry Editor Version 5.00',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium]',
- '"Policy2"="c"',
- '',
- '[HKEY_LOCAL_MACHINE\\Software\\Policies\\Chromium\\Policy1]',
- '"1"="a"',
- '"2"="b"'])
- self.CompareOutputs(output, expected_output)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/template_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/template_writer.py
deleted file mode 100755
index 4a6b77b481e..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/template_writer.py
+++ /dev/null
@@ -1,323 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-class TemplateWriter(object):
- '''Abstract base class for writing policy templates in various formats.
- The methods of this class will be called by PolicyTemplateGenerator.
- '''
-
- def __init__(self, platforms, config):
- '''Initializes a TemplateWriter object.
-
- Args:
- platforms: List of platforms for which this writer can write policies.
- config: A dictionary of information required to generate the template.
- It contains some key-value pairs, including the following examples:
- 'build': 'chrome' or 'chromium'
- 'branding': 'Google Chrome' or 'Chromium'
- 'mac_bundle_id': The Mac bundle id of Chrome. (Only set when building
- for Mac.)
- messages: List of all the message strings from the grd file. Most of them
- are also present in the policy data structures that are passed to
- methods. That is the preferred way of accessing them, this should only
- be used in exceptional cases. An example for its use is the
- IDS_POLICY_WIN_SUPPORTED_WINXPSP2 message in ADM files, because that
- cannot be associated with any policy or group.
- '''
- self.platforms = platforms
- self.config = config
-
- def IsDeprecatedPolicySupported(self, policy):
- '''Checks if the given deprecated policy is supported by the writer.
-
- Args:
- policy: The dictionary of the policy.
-
- Returns:
- True if the writer chooses to include the deprecated 'policy' in its
- output.
- '''
- return False
-
- def IsFuturePolicySupported(self, policy):
- '''Checks if the given future policy is supported by the writer.
-
- Args:
- policy: The dictionary of the policy.
-
- Returns:
- True if the writer chooses to include the deprecated 'policy' in its
- output.
- '''
- return False
-
- def IsPolicySupported(self, policy):
- '''Checks if the given policy is supported by the writer.
- In other words, the set of platforms supported by the writer
- has a common subset with the set of platforms that support
- the policy.
-
- Args:
- policy: The dictionary of the policy.
-
- Returns:
- True if the writer chooses to include 'policy' in its output.
- '''
- if ('deprecated' in policy and policy['deprecated'] is True and
- not self.IsDeprecatedPolicySupported(policy)):
- return False
-
- if ('future' in policy and policy['future'] is True and
- not self.IsFuturePolicySupported(policy)):
- return False
-
- if '*' in self.platforms:
- # Currently chrome_os is only catched here.
- return True
- for supported_on in policy['supported_on']:
- for supported_on_platform in supported_on['platforms']:
- if supported_on_platform in self.platforms:
- return True
- return False
-
- def CanBeRecommended(self, policy):
- '''Checks if the given policy can be recommended.'''
- return policy.get('features', {}).get('can_be_recommended', False)
-
- def CanBeMandatory(self, policy):
- '''Checks if the given policy can be mandatory.'''
- return policy.get('features', {}).get('can_be_mandatory', True)
-
- def IsPolicySupportedOnPlatform(self, policy, platform, product=None):
- '''Checks if |policy| is supported on |product| for |platform|. If not
- specified, only the platform support is checked.
-
- Args:
- policy: The dictionary of the policy.
- platform: The platform to check; one of 'win', 'mac', 'linux' or
- 'chrome_os'.
- product: Optional product to check; one of 'chrome', 'chrome_frame',
- 'chrome_os', 'webview'
- '''
- is_supported = lambda x: (platform in x['platforms'] and
- (not product or product in x['product']))
-
- return any(filter(is_supported, policy['supported_on']))
-
- def _GetChromiumVersionString(self):
- '''Returns the Chromium version string stored in the environment variable
- version (if it is set).
-
- Returns: The Chromium version string or None if it has not been set.'''
-
- if 'version' in self.config:
- return self.config['version']
-
- def _GetPoliciesForWriter(self, group):
- '''Filters the list of policies in the passed group that are supported by
- the writer.
-
- Args:
- group: The dictionary of the policy group.
-
- Returns: The list of policies of the policy group that are compatible
- with the writer.
- '''
- if not 'policies' in group:
- return []
- result = []
- for policy in group['policies']:
- if self.IsPolicySupported(policy):
- result.append(policy)
- return result
-
- def Init(self):
- '''Initializes the writer. If the WriteTemplate method is overridden, then
- this method must be called as first step of each template generation
- process.
- '''
- pass
-
- def WriteTemplate(self, template):
- '''Writes the given template definition.
-
- Args:
- template: Template definition to write.
-
- Returns:
- Generated output for the passed template definition.
- '''
- self.messages = template['messages']
- self.Init()
- template['policy_definitions'] = \
- self.PreprocessPolicies(template['policy_definitions'])
- self.BeginTemplate()
- for policy in template['policy_definitions']:
- if policy['type'] == 'group':
- child_policies = self._GetPoliciesForWriter(policy)
- child_recommended_policies = filter(self.CanBeRecommended,
- child_policies)
- if child_policies:
- # Only write nonempty groups.
- self.BeginPolicyGroup(policy)
- for child_policy in child_policies:
- # Nesting of groups is currently not supported.
- self.WritePolicy(child_policy)
- self.EndPolicyGroup()
- if child_recommended_policies:
- self.BeginRecommendedPolicyGroup(policy)
- for child_policy in child_recommended_policies:
- self.WriteRecommendedPolicy(child_policy)
- self.EndRecommendedPolicyGroup()
- elif self.IsPolicySupported(policy):
- self.WritePolicy(policy)
- if self.CanBeRecommended(policy):
- self.WriteRecommendedPolicy(policy)
- self.EndTemplate()
-
- return self.GetTemplateText()
-
- def PreprocessPolicies(self, policy_list):
- '''Preprocesses a list of policies according to a given writer's needs.
- Preprocessing steps include sorting policies and stripping unneeded
- information such as groups (for writers that ignore them).
- Subclasses are encouraged to override this method, overriding
- implementations may call one of the provided specialized implementations.
- The default behaviour is to use SortPoliciesGroupsFirst().
-
- Args:
- policy_list: A list containing the policies to sort.
-
- Returns:
- The sorted policy list.
- '''
- return self.SortPoliciesGroupsFirst(policy_list)
-
- def WritePolicy(self, policy):
- '''Appends the template text corresponding to a policy into the
- internal buffer.
-
- Args:
- policy: The policy as it is found in the JSON file.
- '''
- raise NotImplementedError()
-
- def WriteComment(self, comment):
- '''Appends the comment to the internal buffer.
-
- comment: The comment to be added.
- '''
- raise NotImplementedError()
-
- def WriteRecommendedPolicy(self, policy):
- '''Appends the template text corresponding to a recommended policy into the
- internal buffer.
-
- Args:
- policy: The recommended policy as it is found in the JSON file.
- '''
- # TODO
- #raise NotImplementedError()
- pass
-
- def BeginPolicyGroup(self, group):
- '''Appends the template text corresponding to the beginning of a
- policy group into the internal buffer.
-
- Args:
- group: The policy group as it is found in the JSON file.
- '''
- pass
-
- def EndPolicyGroup(self):
- '''Appends the template text corresponding to the end of a
- policy group into the internal buffer.
- '''
- pass
-
- def BeginRecommendedPolicyGroup(self, group):
- '''Appends the template text corresponding to the beginning of a recommended
- policy group into the internal buffer.
-
- Args:
- group: The recommended policy group as it is found in the JSON file.
- '''
- pass
-
- def EndRecommendedPolicyGroup(self):
- '''Appends the template text corresponding to the end of a recommended
- policy group into the internal buffer.
- '''
- pass
-
- def BeginTemplate(self):
- '''Appends the text corresponding to the beginning of the whole
- template into the internal buffer.
- '''
- raise NotImplementedError()
-
- def EndTemplate(self):
- '''Appends the text corresponding to the end of the whole
- template into the internal buffer.
- '''
- pass
-
- def GetTemplateText(self):
- '''Gets the content of the internal template buffer.
-
- Returns:
- The generated template from the the internal buffer as a string.
- '''
- raise NotImplementedError()
-
- def SortPoliciesGroupsFirst(self, policy_list):
- '''Sorts a list of policies alphabetically. The order is the
- following: first groups alphabetically by caption, then other policies
- alphabetically by name. The order of policies inside groups is unchanged.
-
- Args:
- policy_list: The list of policies to sort. Sub-lists in groups will not
- be sorted.
- '''
- policy_list.sort(key=self.GetPolicySortingKeyGroupsFirst)
- return policy_list
-
- def FlattenGroupsAndSortPolicies(self, policy_list, sorting_key=None):
- '''Sorts a list of policies according to |sorting_key|, defaulting
- to alphabetical sorting if no key is given. If |policy_list| contains
- policies with type="group", it is flattened first, i.e. any groups' contents
- are inserted into the list as first-class elements and the groups are then
- removed.
- '''
- new_list = []
- for policy in policy_list:
- if policy['type'] == 'group':
- for grouped_policy in policy['policies']:
- new_list.append(grouped_policy)
- else:
- new_list.append(policy)
- if sorting_key == None:
- sorting_key = self.GetPolicySortingKeyName
- new_list.sort(key=sorting_key)
- return new_list
-
- def GetPolicySortingKeyName(self, policy):
- return policy['name']
-
- def GetPolicySortingKeyGroupsFirst(self, policy):
- '''Extracts a sorting key from a policy. These keys can be used for
- list.sort() methods to sort policies.
- See TemplateWriter.SortPolicies for usage.
- '''
- is_group = policy['type'] == 'group'
- if is_group:
- # Groups are sorted by caption.
- str_key = policy['caption']
- else:
- # Regular policies are sorted by name.
- str_key = policy['name']
- # Groups come before regular policies.
- return (not is_group, str_key)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/template_writer_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/template_writer_unittest.py
deleted file mode 100755
index 172e2921b52..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/template_writer_unittest.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Unit tests for grit.format.policy_templates.writers.template_writer'''
-
-import os
-import sys
-if __name__ == '__main__':
- sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
-
-import unittest
-
-from grit.format.policy_templates.writers import template_writer
-
-
-POLICY_DEFS = [
- {'name': 'zp', 'type': 'string', 'caption': 'a1', 'supported_on': []},
- {
- 'type': 'group',
- 'caption': 'z_group1_caption',
- 'name': 'group1',
- 'policies': [
- {'name': 'z0', 'type': 'string', 'supported_on': []},
- {'name': 'a0', 'type': 'string', 'supported_on': []}
- ]
- },
- {
- 'type': 'group',
- 'caption': 'b_group2_caption',
- 'name': 'group2',
- 'policies': [{'name': 'q', 'type': 'string', 'supported_on': []}],
- },
- {'name': 'ap', 'type': 'string', 'caption': 'a2', 'supported_on': []}
-]
-
-
-GROUP_FIRST_SORTED_POLICY_DEFS = [
- {
- 'type': 'group',
- 'caption': 'b_group2_caption',
- 'name': 'group2',
- 'policies': [{'name': 'q', 'type': 'string', 'supported_on': []}],
- },
- {
- 'type': 'group',
- 'caption': 'z_group1_caption',
- 'name': 'group1',
- 'policies': [
- {'name': 'z0', 'type': 'string', 'supported_on': []},
- {'name': 'a0', 'type': 'string', 'supported_on': []}
- ]
- },
- {'name': 'ap', 'type': 'string', 'caption': 'a2', 'supported_on': []},
- {'name': 'zp', 'type': 'string', 'caption': 'a1', 'supported_on': []},
-]
-
-
-IGNORE_GROUPS_SORTED_POLICY_DEFS = [
- {'name': 'a0', 'type': 'string', 'supported_on': []},
- {'name': 'ap', 'type': 'string', 'caption': 'a2', 'supported_on': []},
- {'name': 'q', 'type': 'string', 'supported_on': []},
- {'name': 'z0', 'type': 'string', 'supported_on': []},
- {'name': 'zp', 'type': 'string', 'caption': 'a1', 'supported_on': []},
-]
-
-
-class TemplateWriterUnittests(unittest.TestCase):
- '''Unit tests for templater_writer.py.'''
-
- def testSortingGroupsFirst(self):
- tw = template_writer.TemplateWriter(None, None)
- sorted_list = tw.SortPoliciesGroupsFirst(POLICY_DEFS)
- self.assertEqual(sorted_list, GROUP_FIRST_SORTED_POLICY_DEFS)
-
- def testSortingIgnoreGroups(self):
- tw = template_writer.TemplateWriter(None, None)
- sorted_list = tw.FlattenGroupsAndSortPolicies(POLICY_DEFS)
- self.assertEqual(sorted_list, IGNORE_GROUPS_SORTED_POLICY_DEFS)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/writer_unittest_common.py b/chromium/tools/grit/grit/format/policy_templates/writers/writer_unittest_common.py
deleted file mode 100755
index f75c391cebf..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/writer_unittest_common.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-'''Common tools for unit-testing writers.'''
-
-
-import os
-import tempfile
-import unittest
-import StringIO
-
-from grit import grd_reader
-from grit import util
-from grit.tool import build
-
-
-class DummyOutput(object):
- def __init__(self, type, language, file = 'hello.gif'):
- self.type = type
- self.language = language
- self.file = file
- def GetType(self):
- return self.type
- def GetLanguage(self):
- return self.language
- def GetOutputFilename(self):
- return self.file
-
-
-class WriterUnittestCommon(unittest.TestCase):
- '''Common class for unittesting writers.'''
-
- def PrepareTest(self, policy_json):
- '''Prepares and parses a grit tree along with a data structure of policies.
-
- Args:
- policy_json: The policy data structure in JSON format.
- '''
- # First create a temporary file that contains the JSON policy list.
- tmp_file_name = 'test.json'
- tmp_dir_name = tempfile.gettempdir()
- json_file_path = tmp_dir_name + '/' + tmp_file_name
- with open(json_file_path, 'w') as f:
- f.write(policy_json.strip())
- # Then assemble the grit tree.
- grd_text = '''
- <grit base_dir="." latest_public_release="0" current_release="1" source_lang_id="en">
- <release seq="1">
- <structures>
- <structure name="IDD_POLICY_SOURCE_FILE" file="%s" type="policy_template_metafile" />
- </structures>
- </release>
- </grit>''' % json_file_path
- grd_string_io = StringIO.StringIO(grd_text)
- # Parse the grit tree and load the policies' JSON with a gatherer.
- grd = grd_reader.Parse(grd_string_io, dir=tmp_dir_name)
- grd.SetOutputLanguage('en')
- grd.RunGatherers()
- # Remove the policies' JSON.
- os.unlink(json_file_path)
- return grd
-
- def GetOutput(self, grd, env_lang, env_defs, out_type, out_lang):
- '''Generates an output of a writer.
-
- Args:
- grd: The root of the grit tree.
- env_lang: The environment language.
- env_defs: Environment definitions.
- out_type: Type of the output node for which output will be generated.
- This selects the writer.
- out_lang: Language of the output node for which output will be generated.
-
- Returns:
- The string of the template created by the writer.
- '''
- grd.SetOutputLanguage(env_lang)
- grd.SetDefines(env_defs)
- buf = StringIO.StringIO()
- build.RcBuilder.ProcessNode(grd, DummyOutput(out_type, out_lang), buf)
- return buf.getvalue()
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/xml_formatted_writer.py b/chromium/tools/grit/grit/format/policy_templates/writers/xml_formatted_writer.py
deleted file mode 100755
index 37f51d2f74c..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/xml_formatted_writer.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-from grit.format.policy_templates.writers import template_writer
-
-
-class XMLFormattedWriter(template_writer.TemplateWriter):
- '''Helper class for generating XML-based templates.
- '''
-
- def AddElement(self, parent, name, attrs=None, text=None):
- '''
- Adds a new XML Element as a child to an existing element or the Document.
-
- Args:
- parent: An XML element or the document, where the new element will be
- added.
- name: The name of the new element.
- attrs: A dictionary of the attributes' names and values for the new
- element.
- text: Text content for the new element.
-
- Returns:
- The created new element.
- '''
- if attrs == None:
- attrs = {}
-
- doc = parent.ownerDocument
- element = doc.createElement(name)
- for key, value in sorted(attrs.iteritems()):
- element.setAttribute(key, value)
- if text:
- element.appendChild(doc.createTextNode(text))
- parent.appendChild(element)
- return element
-
- def AddText(self, parent, text):
- '''Adds text to a parent node.
- '''
- doc = parent.ownerDocument
- parent.appendChild(doc.createTextNode(text))
-
- def AddAttribute(self, parent, name, value):
- '''Adds a new attribute to the parent Element. If an attribute with the
- given name already exists then it will be replaced.
- '''
- doc = parent.ownerDocument
- attribute = doc.createAttribute(name)
- attribute.value = value
- parent.setAttributeNode(attribute)
-
- def AddComment(self, parent, comment):
- '''Adds a comment node.'''
- parent.appendChild(parent.ownerDocument.createComment(comment))
-
- def ToPrettyXml(self, doc, **kwargs):
- # return doc.toprettyxml(indent=' ')
- # The above pretty-printer does not print the doctype and adds spaces
- # around texts, e.g.:
- # <string>
- # value of the string
- # </string>
- # This is problematic both for the OSX Workgroup Manager (plist files) and
- # the Windows Group Policy Editor (admx files). What they need instead:
- # <string>value of string</string>
- # So we use a hacky pretty printer here. It assumes that there are no
- # mixed-content nodes.
- # Get all the XML content in a one-line string.
- xml = doc.toxml(**kwargs)
- # Determine where the line breaks will be. (They will only be between tags.)
- lines = xml[1:len(xml) - 1].split('><')
- indent = ''
- res = ''
- # Determine indent for each line.
- for i, line in enumerate(lines):
- if line[0] == '/':
- # If the current line starts with a closing tag, decrease indent before
- # printing.
- indent = indent[2:]
- lines[i] = indent + '<' + line + '>'
- if (line[0] not in ['/', '?', '!'] and '</' not in line and
- line[len(line) - 1] != '/'):
- # If the current line starts with an opening tag and does not conatin a
- # closing tag, increase indent after the line is printed.
- indent += ' '
- # Reconstruct XML text from the lines.
- return '\n'.join(lines)
diff --git a/chromium/tools/grit/grit/format/policy_templates/writers/xml_writer_base_unittest.py b/chromium/tools/grit/grit/format/policy_templates/writers/xml_writer_base_unittest.py
deleted file mode 100755
index 8d7a7f24104..00000000000
--- a/chromium/tools/grit/grit/format/policy_templates/writers/xml_writer_base_unittest.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-
-"""Unittests for grit.format.policy_templates.writers.admx_writer."""
-
-
-import re
-import unittest
-
-
-class XmlWriterBaseTest(unittest.TestCase):
- '''Base class for XML writer unit-tests.
- '''
-
- def GetXMLOfChildren(self, parent):
- '''Returns the XML of all child nodes of the given parent node.
- Args:
- parent: The XML of the children of this node will be returned.
-
- Return: XML of the chrildren of the parent node.
- '''
- raw_pretty_xml = ''.join(
- child.toprettyxml(indent=' ') for child in parent.childNodes)
- # Python 2.6.5 which is present in Lucid has bug in its pretty print
- # function which produces new lines around string literals. This has been
- # fixed in Precise which has Python 2.7.3 but we have to keep compatibility
- # with both for now.
- text_re = re.compile('>\n\s+([^<>\s].*?)\n\s*</', re.DOTALL)
- return text_re.sub('>\g<1></', raw_pretty_xml)
-
- def AssertXMLEquals(self, output, expected_output):
- '''Asserts if the passed XML arguements are equal.
- Args:
- output: Actual XML text.
- expected_output: Expected XML text.
- '''
- self.assertEquals(output.strip(), expected_output.strip())
diff --git a/chromium/tools/grit/grit/format/policy_templates_json.py b/chromium/tools/grit/grit/format/policy_templates_json.py
new file mode 100755
index 00000000000..13f680e4bdf
--- /dev/null
+++ b/chromium/tools/grit/grit/format/policy_templates_json.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Translates policy_templates.json files.
+"""
+
+from grit.node import structure
+
+
+def Format(root, lang='en', output_dir='.'):
+ policy_json = None
+ for item in root.ActiveDescendants():
+ with item:
+ if (isinstance(item, structure.StructureNode) and
+ item.attrs['type'] == 'policy_template_metafile'):
+ json_text = item.gatherer.Translate(
+ lang,
+ pseudo_if_not_available=item.PseudoIsAllowed(),
+ fallback_to_english=item.ShouldFallbackToEnglish())
+ # We're only expecting one node of this kind.
+ assert not policy_json
+ policy_json = json_text
+ return policy_json
diff --git a/chromium/tools/grit/grit/format/policy_templates_json_unittest.py b/chromium/tools/grit/grit/format/policy_templates_json_unittest.py
new file mode 100755
index 00000000000..0c9b157f7f1
--- /dev/null
+++ b/chromium/tools/grit/grit/format/policy_templates_json_unittest.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+# coding: utf-8
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unittest for policy_templates_json.py.
+"""
+
+import os
+import sys
+if __name__ == '__main__':
+ sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
+
+import grit.extern.tclib
+import tempfile
+import unittest
+import StringIO
+
+from grit import grd_reader
+from grit import util
+from grit.tool import build
+
+
+class PolicyTemplatesJsonUnittest(unittest.TestCase):
+
+ def testPolicyTranslation(self):
+ # Create test policy_templates.json data.
+ caption = "The main policy"
+ caption_translation = "Die Hauptrichtilinie"
+
+ message = \
+ "Red cabbage stays red cabbage and wedding dress stays wedding dress"
+ message_translation = \
+ "Blaukraut bleibt Blaukraut und Brautkleid bleibt Brautkleid"
+
+ policy_json = """
+ {
+ "policy_definitions": [
+ {
+ 'name': 'MainPolicy',
+ 'type': 'main',
+ 'schema': { 'type': 'boolean' },
+ 'supported_on': ['chrome_os:29-'],
+ 'features': {
+ 'can_be_recommended': True,
+ 'dynamic_refresh': True,
+ },
+ 'example_value': True,
+ 'caption': '''%s''',
+ 'tags': [],
+ 'desc': '''This policy does stuff.'''
+ },
+ ],
+ "placeholders": [],
+ "messages": {
+ 'message_string_id': {
+ 'desc': '''The description is removed from the grit output''',
+ 'text': '''%s'''
+ }
+ }
+ }""" % (caption, message)
+
+ # Create translations. The translation IDs are hashed from the English text.
+ caption_id = grit.extern.tclib.GenerateMessageId(caption);
+ message_id = grit.extern.tclib.GenerateMessageId(message);
+ policy_xtb = """
+<?xml version="1.0" ?>
+<!DOCTYPE translationbundle>
+<translationbundle lang="de">
+<translation id="%s">%s</translation>
+<translation id="%s">%s</translation>
+</translationbundle>""" % (caption_id, caption_translation,
+ message_id, message_translation)
+
+ # Write both to a temp file.
+ tmp_dir_name = tempfile.gettempdir()
+
+ json_file_path = os.path.join(tmp_dir_name, 'test.json')
+ with open(json_file_path, 'w') as f:
+ f.write(policy_json.strip())
+
+ xtb_file_path = os.path.join(tmp_dir_name, 'test.xtb')
+ with open(xtb_file_path, 'w') as f:
+ f.write(policy_xtb.strip())
+
+ # Assemble a test grit tree, similar to policy_templates.grd.
+ grd_text = '''
+ <grit base_dir="." latest_public_release="0" current_release="1" source_lang_id="en">
+ <translations>
+ <file path="%s" lang="de" />
+ </translations>
+ <release seq="1">
+ <structures>
+ <structure name="IDD_POLICY_SOURCE_FILE" file="%s" type="policy_template_metafile" />
+ </structures>
+ </release>
+ </grit>''' % (xtb_file_path, json_file_path)
+ grd_string_io = StringIO.StringIO(grd_text)
+
+ # Parse the grit tree and load the policies' JSON with a gatherer.
+ grd = grd_reader.Parse(grd_string_io, dir=tmp_dir_name)
+ grd.SetOutputLanguage('en')
+ grd.RunGatherers()
+
+ # Remove the temp files.
+ os.unlink(xtb_file_path)
+ os.unlink(json_file_path)
+
+ # Run grit with en->de translation.
+ env_lang = 'en'
+ out_lang = 'de'
+ env_defs = {'_google_chrome': '1'}
+
+ grd.SetOutputLanguage(env_lang)
+ grd.SetDefines(env_defs)
+ buf = StringIO.StringIO()
+ build.RcBuilder.ProcessNode(grd, DummyOutput('policy_templates', out_lang), buf)
+ output = buf.getvalue()
+
+ # Caption and message texts get taken from xtb.
+ # desc is 'translated' to some pseudo-English
+ # 'ThïPïs pôPôlïPïcýPý dôéPôés stüPüff'.
+ expected = u"""{
+ 'policy_definitions': [
+ {
+ 'caption': '''%s''',
+ 'features': {'can_be_recommended': True, 'dynamic_refresh': True},
+ 'name': 'MainPolicy',
+ 'tags': [],
+ 'desc': '''Th\xefP\xefs p\xf4P\xf4l\xefP\xefc\xfdP\xfd d\xf4\xe9P\xf4\xe9s st\xfcP\xfcff.''',
+ 'type': 'main',
+ 'example_value': True,
+ 'supported_on': ['chrome_os:29-'],
+ 'schema': {'type': 'boolean'},
+ },
+ ],
+ 'messages': {
+ 'message_string_id': {
+ 'text': '''%s'''
+ },
+ },
+
+}""" % (caption_translation, message_translation)
+ self.assertEqual(expected, output)
+
+
+class DummyOutput(object):
+
+ def __init__(self, type, language):
+ self.type = type
+ self.language = language
+
+ def GetType(self):
+ return self.type
+
+ def GetLanguage(self):
+ return self.language
+
+ def GetOutputFilename(self):
+ return 'hello.gif'
diff --git a/chromium/tools/grit/grit/grd_reader.py b/chromium/tools/grit/grit/grd_reader.py
index c337b6f2c8e..fb33e9c7831 100755
--- a/chromium/tools/grit/grit/grd_reader.py
+++ b/chromium/tools/grit/grit/grd_reader.py
@@ -26,7 +26,7 @@ class StopParsingException(Exception):
class GrdContentHandler(xml.sax.handler.ContentHandler):
def __init__(self, stop_after, debug, dir, defines, tags_to_ignore,
- target_platform):
+ target_platform, source):
# Invariant of data:
# 'root' is the root of the parse tree being created, or None if we haven't
# parsed out any elements.
@@ -42,6 +42,7 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
self.tags_to_ignore = tags_to_ignore or set()
self.ignore_depth = 0
self.target_platform = target_platform
+ self.source = source
def startElement(self, name, attrs):
if self.ignore_depth or name in self.tags_to_ignore:
@@ -57,6 +58,7 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
typeattr = attrs.get('type')
node = mapping.ElementToClass(name, typeattr)()
+ node.source = self.source
if self.stack:
self.stack[-1].AddChild(node)
@@ -89,7 +91,12 @@ class GrdContentHandler(xml.sax.handler.ContentHandler):
if not os.path.exists(partname):
raise exception.FileNotFound()
# Exceptions propagate to the handler in grd_reader.Parse().
- xml.sax.parse(partname, GrdPartContentHandler(self))
+ oldsource = self.source
+ try:
+ self.source = partname
+ xml.sax.parse(partname, GrdPartContentHandler(self))
+ finally:
+ self.source = oldsource
if self.debug:
print "End parsing of element %s" % name
@@ -178,13 +185,17 @@ def Parse(filename_or_stream, dir=None, stop_after=None, first_ids_file=None,
grit.exception.Parsing
'''
- if dir is None and isinstance(filename_or_stream, types.StringType):
- dir = util.dirname(filename_or_stream)
+ if isinstance(filename_or_stream, types.StringType):
+ source = filename_or_stream
+ if dir is None:
+ dir = util.dirname(filename_or_stream)
+ else:
+ source = None
rc_header.SetPredeterminedIdsFile(predetermined_ids_file)
handler = GrdContentHandler(stop_after=stop_after, debug=debug, dir=dir,
defines=defines, tags_to_ignore=tags_to_ignore,
- target_platform=target_platform)
+ target_platform=target_platform, source=source)
try:
xml.sax.parse(filename_or_stream, handler)
except StopParsingException:
diff --git a/chromium/tools/grit/grit/grd_reader_unittest.py b/chromium/tools/grit/grit/grd_reader_unittest.py
index c782d02df45..a6a80a0ddec 100755
--- a/chromium/tools/grit/grit/grd_reader_unittest.py
+++ b/chromium/tools/grit/grit/grd_reader_unittest.py
@@ -18,6 +18,7 @@ from grit import grd_reader
from grit import util
from grit.node import base
from grit.node import empty
+from grit.node import message
class GrdReaderUnittest(unittest.TestCase):
@@ -210,7 +211,7 @@ class GrdReaderUnittest(unittest.TestCase):
hello = root.GetNodeById('IDS_HELLO')
self.failUnless(hello.GetCliques()[0].GetId() == 'IDS_HELLO')
- def testPartInclusion(self):
+ def testPartInclusionAndCorrectSource(self):
arbitrary_path_grd = u'''\
<grit-part>
<message name="IDS_TEST5" desc="test5">test5</message>
@@ -268,9 +269,21 @@ class GrdReaderUnittest(unittest.TestCase):
</messages>
</release>
</grit>''' % arbitrary_path_grd_file
+
with util.TempDir({'sub.grp': sub_grd,
'subsub.grp': subsub_grd}) as temp_dir:
output = grd_reader.Parse(StringIO.StringIO(top_grd), temp_dir.GetPath())
+ correct_sources = {
+ 'IDS_TEST': None,
+ 'IDS_TEST2': temp_dir.GetPath('sub.grp'),
+ 'IDS_TEST3': temp_dir.GetPath('sub.grp'),
+ 'IDS_TEST4': temp_dir.GetPath('subsub.grp'),
+ 'IDS_TEST5': arbitrary_path_grd_file,
+ }
+ for node in output.ActiveDescendants():
+ with node:
+ if isinstance(node, message.MessageNode):
+ self.assertEqual(correct_sources[node.attrs.get('name')], node.source)
self.assertEqual(expected_output.split(), output.FormatXml().split())
def testPartInclusionFailure(self):
diff --git a/chromium/tools/grit/grit/node/base.py b/chromium/tools/grit/grit/node/base.py
index a40794b159d..abb98c03135 100755
--- a/chromium/tools/grit/grit/node/base.py
+++ b/chromium/tools/grit/grit/node/base.py
@@ -8,12 +8,14 @@
import ast
import os
+import sys
import types
from xml.sax import saxutils
from grit import clique
from grit import exception
from grit import util
+import grit.format.gzip_string
class Node(object):
@@ -43,6 +45,7 @@ class Node(object):
self.attrs = {} # The set of attributes (keys to values)
self.parent = None # Our parent unless we are the root element.
self.uberclique = None # Allows overriding uberclique for parts of tree
+ self.source = None # File that this node was parsed from
# This context handler allows you to write "with node:" and get a
# line identifying the offending node if an exception escapes from the body
@@ -605,6 +608,26 @@ class Node(object):
from the root node.'''
return False
+ def CompressDataIfNeeded(self, data):
+ '''Compress data using the format specified in the compress attribute.
+
+ Args:
+ data: The data to compressed.
+ Returns:
+ The data in compressed format. If the format was unknown or not supported
+ on the target platform then returns the data uncompressed.
+ '''
+ if (self.attrs.get('compress') != 'gzip'
+ or self.GetRoot().target_platform == 'ios'):
+ return data
+
+ # We only use rsyncable compression on Linux.
+ # We exclude ChromeOS since ChromeOS bots are Linux based but do not have
+ # the --rsyncable option built in for gzip. See crbug.com/617950.
+ if sys.platform == 'linux2' and 'chromeos' not in self.GetRoot().defines:
+ return grit.format.gzip_string.GzipStringRsyncable(data)
+ return grit.format.gzip_string.GzipString(data)
+
class ContentNode(Node):
'''Convenience baseclass for nodes that can have content.'''
diff --git a/chromium/tools/grit/grit/node/include.py b/chromium/tools/grit/grit/node/include.py
index 46f1c07be15..5787e6e1fb2 100755
--- a/chromium/tools/grit/grit/node/include.py
+++ b/chromium/tools/grit/grit/node/include.py
@@ -7,11 +7,9 @@
"""
import os
-import sys
from grit import exception
from grit import util
-import grit.format.gzip_string
import grit.format.html_inline
import grit.format.rc
import grit.format.rc_header
@@ -97,19 +95,10 @@ class IncludeNode(base.Node):
# Note that the minifier will only do anything if a minifier command
# has been set in the command line.
data = minifier.Minify(data, filename)
- use_gzip = self.attrs.get('compress', '') == 'gzip'
- if use_gzip and self.GetRoot().target_platform != 'ios':
- # We only use rsyncable compression on Linux.
- # We exclude ChromeOS since ChromeOS bots are Linux based but do not have
- # the --rsyncable option built in for gzip. See crbug.com/617950.
- if sys.platform == 'linux2' and 'chromeos' not in self.GetRoot().defines:
- data = grit.format.gzip_string.GzipStringRsyncable(data)
- else:
- data = grit.format.gzip_string.GzipString(data)
# Include does not care about the encoding, because it only returns binary
# data.
- return id, data
+ return id, self.CompressDataIfNeeded(data)
def Process(self, output_dir):
"""Rewrite file references to be base64 encoded data URLs. The new file
diff --git a/chromium/tools/grit/grit/node/structure.py b/chromium/tools/grit/grit/node/structure.py
index 07fe6492f11..4f0226f303d 100755
--- a/chromium/tools/grit/grit/node/structure.py
+++ b/chromium/tools/grit/grit/node/structure.py
@@ -150,6 +150,7 @@ class StructureNode(base.Node):
# dependencies.
'sconsdep' : 'false',
'variables': '',
+ 'compress': 'false',
}
def IsExcludedFromRc(self):
@@ -205,8 +206,10 @@ class StructureNode(base.Node):
id = id_map[self.GetTextualIds()[0]]
if self.ExpandVariables():
text = self.gatherer.GetText()
- return id, util.Encode(self._Substitute(text), encoding)
- return id, self.gatherer.GetData(lang, encoding)
+ data = util.Encode(self._Substitute(text), encoding)
+ else:
+ data = self.gatherer.GetData(lang, encoding)
+ return id, self.CompressDataIfNeeded(data)
def GetHtmlResourceFilenames(self):
"""Returns a set of all filenames inlined by this node."""
diff --git a/chromium/tools/grit/grit/node/structure_unittest.py b/chromium/tools/grit/grit/node/structure_unittest.py
index a039bce984b..03d63baab7f 100755
--- a/chromium/tools/grit/grit/node/structure_unittest.py
+++ b/chromium/tools/grit/grit/node/structure_unittest.py
@@ -9,6 +9,7 @@
import os
import os.path
import sys
+import zlib
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
@@ -64,6 +65,35 @@ class StructureUnittest(unittest.TestCase):
' Hello!\n'
'</p>\n'), result)
+ def testCompressGzip(self):
+ test_data_root = util.PathFromRoot('grit/testdata')
+ root = util.ParseGrdForUnittest('''
+ <structures>
+ <structure name="TEST_TXT" file="test_text.txt"
+ compress="gzip" type="chrome_html" />
+ </structures>''', base_dir=test_data_root)
+ struct, = root.GetChildrenOfType(structure.StructureNode)
+ struct.RunPreSubstitutionGatherer()
+ _, compressed = struct.GetDataPackPair(lang='en', encoding=1)
+
+ decompressed_data = zlib.decompress(compressed, 16 + zlib.MAX_WBITS)
+ self.assertEqual(util.ReadFile(
+ os.path.join(test_data_root, "test_text.txt"), util.BINARY),
+ decompressed_data)
+
+ def testNotCompressed(self):
+ test_data_root = util.PathFromRoot('grit/testdata')
+ root = util.ParseGrdForUnittest('''
+ <structures>
+ <structure name="TEST_TXT" file="test_text.txt" type="chrome_html" />
+ </structures>''', base_dir=test_data_root)
+ struct, = root.GetChildrenOfType(structure.StructureNode)
+ struct.RunPreSubstitutionGatherer()
+ _, data = struct.GetDataPackPair(lang='en', encoding=1)
+
+ self.assertEqual(util.ReadFile(
+ os.path.join(test_data_root, "test_text.txt"), util.BINARY), data)
+
if __name__ == '__main__':
unittest.main()
diff --git a/chromium/tools/grit/grit/test_suite_all.py b/chromium/tools/grit/grit/test_suite_all.py
index 32c91a07e35..4befd3313d4 100755
--- a/chromium/tools/grit/grit/test_suite_all.py
+++ b/chromium/tools/grit/grit/test_suite_all.py
@@ -38,21 +38,10 @@ class TestSuiteAll(unittest.TestSuite):
import grit.format.gzip_string_unittest
import grit.format.html_inline_unittest
import grit.format.js_map_format_unittest
+ import grit.format.policy_templates_json_unittest
import grit.format.rc_header_unittest
import grit.format.rc_unittest
import grit.format.resource_map_unittest
- import grit.format.policy_templates.policy_template_generator_unittest
- import grit.format.policy_templates.writers.adm_writer_unittest
- import grit.format.policy_templates.writers.adml_writer_unittest
- import grit.format.policy_templates.writers.admx_writer_unittest
- import grit.format.policy_templates.writers.android_policy_writer_unittest
- import grit.format.policy_templates.writers.doc_writer_unittest
- import grit.format.policy_templates.writers.json_writer_unittest
- import grit.format.policy_templates.writers.plist_strings_writer_unittest
- import grit.format.policy_templates.writers.plist_writer_unittest
- import grit.format.policy_templates.writers.reg_writer_unittest
- import grit.format.policy_templates.writers.template_writer_unittest
- import grit.format.policy_templates.writers.xml_writer_base_unittest
import grit.gather.admin_template_unittest
import grit.gather.chrome_html_unittest
import grit.gather.chrome_scaled_image_unittest
@@ -92,36 +81,13 @@ class TestSuiteAll(unittest.TestSuite):
grit.format.chrome_messages_json_unittest.
ChromeMessagesJsonFormatUnittest,
grit.format.data_pack_unittest.FormatDataPackUnittest,
+ grit.format.gzip_string_unittest.FormatGzipStringUnittest,
grit.format.html_inline_unittest.HtmlInlineUnittest,
grit.format.js_map_format_unittest.JsMapFormatUnittest,
+ grit.format.policy_templates_json_unittest.PolicyTemplatesJsonUnittest,
grit.format.rc_header_unittest.RcHeaderFormatterUnittest,
- grit.format.gzip_string_unittest.FormatGzipStringUnittest,
grit.format.rc_unittest.FormatRcUnittest,
grit.format.resource_map_unittest.FormatResourceMapUnittest,
- grit.format.policy_templates.policy_template_generator_unittest.
- PolicyTemplateGeneratorUnittest,
- grit.format.policy_templates.writers.adm_writer_unittest.
- AdmWriterUnittest,
- grit.format.policy_templates.writers.adml_writer_unittest.
- AdmlWriterUnittest,
- grit.format.policy_templates.writers.admx_writer_unittest.
- AdmxWriterUnittest,
- grit.format.policy_templates.writers.android_policy_writer_unittest.
- AndroidPolicyWriterUnittest,
- grit.format.policy_templates.writers.doc_writer_unittest.
- DocWriterUnittest,
- grit.format.policy_templates.writers.json_writer_unittest.
- JsonWriterUnittest,
- grit.format.policy_templates.writers.plist_strings_writer_unittest.
- PListStringsWriterUnittest,
- grit.format.policy_templates.writers.plist_writer_unittest.
- PListWriterUnittest,
- grit.format.policy_templates.writers.reg_writer_unittest.
- RegWriterUnittest,
- grit.format.policy_templates.writers.template_writer_unittest.
- TemplateWriterUnittests,
- grit.format.policy_templates.writers.xml_writer_base_unittest.
- XmlWriterBaseTest,
grit.gather.admin_template_unittest.AdmGathererUnittest,
grit.gather.chrome_html_unittest.ChromeHtmlUnittest,
grit.gather.chrome_scaled_image_unittest.ChromeScaledImageUnittest,
diff --git a/chromium/tools/grit/grit/tool/build.py b/chromium/tools/grit/grit/tool/build.py
index d2c9ff16b57..f409c95842e 100755
--- a/chromium/tools/grit/grit/tool/build.py
+++ b/chromium/tools/grit/grit/tool/build.py
@@ -31,6 +31,7 @@ _format_modules = {
'android': 'android_xml',
'c_format': 'c_format',
'chrome_messages_json': 'chrome_messages_json',
+ 'policy_templates': 'policy_templates_json',
'data_package': 'data_pack',
'js_map_format': 'js_map_format',
'rc_all': 'rc',
@@ -41,11 +42,6 @@ _format_modules = {
'resource_map_source': 'resource_map',
'resource_file_map_source': 'resource_map',
}
-_format_modules.update(
- (type, 'policy_templates.template_formatter') for type in
- [ 'adm', 'admx', 'adml', 'reg', 'doc', 'json',
- 'plist', 'plist_strings', 'android_policy' ])
-
def GetFormatter(type):
modulename = 'grit.format.' + _format_modules[type]
@@ -336,6 +332,13 @@ are exported to translation interchange files (e.g. XMB files), etc.
formatter = GetFormatter(output_node.GetType())
formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)
outfile.writelines(formatted)
+ if output_node.GetType() == 'data_package':
+ with open(output_node.GetOutputFilename() + '.info', 'w') as infofile:
+ if node.info:
+ # We terminate with a newline so that when these files are
+ # concatenated later we consistently terminate with a newline so
+ # consumers can account for terminating newlines.
+ infofile.writelines(['\n'.join(node.info), '\n'])
def Process(self):
diff --git a/chromium/tools/grit/grit_rule.gni b/chromium/tools/grit/grit_rule.gni
index 7b840f800c4..82bf44e3023 100644
--- a/chromium/tools/grit/grit_rule.gni
+++ b/chromium/tools/grit/grit_rule.gni
@@ -163,13 +163,6 @@ if (use_aura) {
]
}
-if (use_ash) {
- grit_defines += [
- "-D",
- "use_ash",
- ]
-}
-
if (use_nss_certs) {
grit_defines += [
"-D",
@@ -331,6 +324,16 @@ template("grit") {
grit_outputs =
get_path_info(rebase_path(invoker.outputs, ".", output_dir), "abspath")
+ # Add .info output for all pak files
+ set_sources_assignment_filter([ "*.pak" ])
+ sources = grit_outputs
+ pak_grit_outputs = grit_outputs - sources
+ sources = []
+ pak_info_outputs = []
+ foreach(output, pak_grit_outputs) {
+ pak_info_outputs += [ "${output}.info" ]
+ }
+
# The config and the action below get this visibility son only the generated
# source set can depend on them. The variable "target_name" will get
# overwritten inside the inner classes so we need to compute it here.
@@ -362,7 +365,7 @@ template("grit") {
}
depfile = "$depfile_dir/${grit_output_name}_stamp.d"
- outputs = [ "${depfile}.stamp" ] + grit_outputs
+ outputs = [ "${depfile}.stamp" ] + grit_outputs + pak_info_outputs
args = [
"-i",
diff --git a/chromium/tools/grit/repack.gni b/chromium/tools/grit/repack.gni
index b489c774e0c..55f81d7223d 100644
--- a/chromium/tools/grit/repack.gni
+++ b/chromium/tools/grit/repack.gni
@@ -51,6 +51,7 @@ template("repack") {
inputs = invoker.sources
outputs = [
invoker.output,
+ "${invoker.output}.info",
]
args = [ "repack" ]
diff --git a/chromium/tools/gritsettings/resource_ids b/chromium/tools/gritsettings/resource_ids
index ee0116d1852..b0c2f87c10e 100644
--- a/chromium/tools/gritsettings/resource_ids
+++ b/chromium/tools/gritsettings/resource_ids
@@ -81,17 +81,23 @@
# START chrome/browser section.
"chrome/browser/browser_resources.grd": {
"includes": [11000],
- "structures": [11520],
+ "structures": [11470],
},
"chrome/browser/resources/component_extension_resources.grd": {
- "includes": [11610],
- "structures": [11860],
+ "includes": [11560],
+ "structures": [11810],
},
"chrome/browser/resources/invalidations_resources.grd": {
- "includes": [11910],
+ "includes": [11860],
+ },
+ "chrome/browser/resources/md_extensions/extensions_resources_vulcanized.grd": {
+ "includes": [11900],
+ },
+ "chrome/browser/resources/md_extensions/extensions_resources.grd": {
+ "structures": [11910],
},
"chrome/browser/resources/net_internals_resources.grd": {
- "includes": [11960],
+ "includes": [12000],
},
"chrome/browser/resources/password_manager_internals_resources.grd": {
"includes": [12040],
diff --git a/chromium/tools/idl_parser/idl_lexer.py b/chromium/tools/idl_parser/idl_lexer.py
index c983404730b..60b82881d7b 100755
--- a/chromium/tools/idl_parser/idl_lexer.py
+++ b/chromium/tools/idl_parser/idl_lexer.py
@@ -79,6 +79,7 @@ class IDLLexer(object):
'legacycaller' : 'LEGACYCALLER',
'long' : 'LONG',
'maplike': 'MAPLIKE',
+ 'namespace' : 'NAMESPACE',
'Nan' : 'NAN',
'null' : 'NULL',
'object' : 'OBJECT',
diff --git a/chromium/tools/idl_parser/idl_node.py b/chromium/tools/idl_parser/idl_node.py
index afc309e1216..266a526fc84 100755
--- a/chromium/tools/idl_parser/idl_node.py
+++ b/chromium/tools/idl_parser/idl_node.py
@@ -76,6 +76,11 @@ class IDLAttribute(object):
# version aware.
#
class IDLNode(object):
+ VERBOSE_PROPS = [
+ 'PROD', 'NAME', 'VALUE', 'TYPE',
+ 'ERRORS', 'WARNINGS', 'FILENAME', 'LINENO', 'POSITION', 'DATETIME',
+ ]
+
def __init__(self, cls, filename, lineno, pos, children=None):
self._cls = cls
self._properties = {
@@ -140,29 +145,28 @@ class IDLNode(object):
search.Exit(self)
- def Tree(self, filter_nodes=None, accept_props=None):
+ def Tree(self, filter_nodes=None, suppress_props=VERBOSE_PROPS):
class DumpTreeSearch(IDLSearch):
def __init__(self, props):
IDLSearch.__init__(self)
self.out = []
- self.props = props
+ self.props = props or []
def Enter(self, node):
tab = ''.rjust(self.depth * 2)
self.out.append(tab + str(node))
- if self.props:
- proplist = []
- for key, value in node.GetProperties().iteritems():
- if key in self.props:
- proplist.append(tab + ' %s: %s' % (key, str(value)))
- if proplist:
- self.out.append(tab + ' PROPERTIES')
- self.out.extend(proplist)
+
+ proplist = []
+ for key, value in node.GetProperties().iteritems():
+ if key not in self.props:
+ proplist.append(tab + ' %s: %s' % (key, str(value)))
+ if proplist:
+ self.out.extend(proplist)
if filter_nodes == None:
filter_nodes = ['SpecialComment']
- search = DumpTreeSearch(accept_props)
+ search = DumpTreeSearch(suppress_props)
self.Traverse(search, filter_nodes)
return search.out
diff --git a/chromium/tools/idl_parser/idl_parser.py b/chromium/tools/idl_parser/idl_parser.py
index fff00a6792c..d498a66b836 100755
--- a/chromium/tools/idl_parser/idl_parser.py
+++ b/chromium/tools/idl_parser/idl_parser.py
@@ -59,6 +59,9 @@ ERROR_REMAP = {
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
+_EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES = [
+ 'Clamp', 'EnforceRange', 'TreatNullAs']
+
def Boolean(val):
"""Convert to strict boolean type."""
@@ -134,6 +137,24 @@ def ExtractSpecialComment(comment):
lines.append(line)
return '\n'.join(lines)
+# There are two groups of ExtendedAttributes.
+# One group can apply to types (It is said "applicable to types"),
+# but the other cannot apply to types.
+# This function is intended to divide ExtendedAttributes into those 2 groups.
+# For more details at
+# https://heycam.github.io/webidl/#extended-attributes-applicable-to-types
+def DivideExtAttrsIntoApplicableAndNonApplicable(extended_attribute_list):
+ if not extended_attribute_list:
+ return [[], []]
+ else:
+ applicable_to_types = []
+ non_applicable_to_types = []
+ for ext_attribute in extended_attribute_list.GetChildren():
+ if ext_attribute.GetName() in _EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES:
+ applicable_to_types.append(ext_attribute)
+ else:
+ non_applicable_to_types.append(ext_attribute)
+ return [applicable_to_types, non_applicable_to_types]
#
# IDL Parser
@@ -220,14 +241,20 @@ def ExtractSpecialComment(comment):
# the Web IDL spec, such as allowing string list in extended attributes.
class IDLParser(object):
def p_Definitions(self, p):
- """Definitions : ExtendedAttributeList Definition Definitions
+ """Definitions : SpecialComments ExtendedAttributeList Definition Definitions
+ | ExtendedAttributeList Definition Definitions
| """
- if len(p) > 1:
+ if len(p) > 4:
+ special_comments_and_attribs = ListFromConcat(p[1], p[2])
+ p[3].AddChildren(special_comments_and_attribs)
+ p[0] = ListFromConcat(p[3], p[4])
+ elif len(p) > 1:
p[2].AddChildren(p[1])
p[0] = ListFromConcat(p[2], p[3])
def p_Definition(self, p):
"""Definition : CallbackOrInterface
+ | Namespace
| Partial
| Dictionary
| Enum
@@ -244,7 +271,8 @@ class IDLParser(object):
"""CallbackOrInterface : CALLBACK CallbackRestOrInterface
| Interface"""
if len(p) > 2:
- p[2].AddChildren(self.BuildTrue('CALLBACK'))
+ if p[2].GetClass() != 'Callback':
+ p[2].AddChildren(self.BuildTrue('CALLBACK'))
p[0] = p[2]
else:
p[0] = p[1]
@@ -265,7 +293,7 @@ class IDLParser(object):
def p_Partial(self, p):
"""Partial : PARTIAL PartialDefinition"""
- p[2].AddChildren(self.BuildTrue('Partial'))
+ p[2].AddChildren(self.BuildTrue('PARTIAL'))
p[0] = p[2]
# Error recovery for Partial
@@ -275,7 +303,8 @@ class IDLParser(object):
def p_PartialDefinition(self, p):
"""PartialDefinition : PartialDictionary
- | PartialInterface"""
+ | PartialInterface
+ | Namespace"""
p[0] = p[1]
def p_PartialInterface(self, p):
@@ -324,11 +353,10 @@ class IDLParser(object):
p[0] = self.BuildError(p, 'Dictionary')
def p_DictionaryMembers(self, p):
- """DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
+ """DictionaryMembers : DictionaryMember DictionaryMembers
|"""
if len(p) > 1:
- p[2].AddChildren(p[1])
- p[0] = ListFromConcat(p[2], p[3])
+ p[0] = ListFromConcat(p[1], p[2])
# Error recovery for DictionaryMembers
def p_DictionaryMembersError(self, p):
@@ -336,19 +364,28 @@ class IDLParser(object):
p[0] = self.BuildError(p, 'DictionaryMembers')
def p_DictionaryMember(self, p):
- """DictionaryMember : Required Type identifier Default ';'"""
- p[0] = self.BuildNamed('Key', p, 3, ListFromConcat(p[1], p[2], p[4]))
-
- def p_Required(self, p):
- """Required : REQUIRED
- |"""
- if len(p) > 1:
- p[0] = self.BuildTrue('REQUIRED')
+ """DictionaryMember : ExtendedAttributeList REQUIRED TypeWithExtendedAttributes identifier Default ';'
+ | ExtendedAttributeList Type identifier Default ';'"""
+ if len(p) > 6:
+ p[2] = self.BuildTrue('REQUIRED')
+ p[0] = self.BuildNamed('Key', p, 4, ListFromConcat(p[2], p[3], p[5]))
+ p[0].AddChildren(p[1])
+ else:
+ applicable_to_types, non_applicable_to_types = \
+ DivideExtAttrsIntoApplicableAndNonApplicable(p[1])
+ if applicable_to_types:
+ attributes = self.BuildProduction('ExtAttributes', p, 1,
+ applicable_to_types)
+ p[2].AddChildren(attributes)
+ p[0] = self.BuildNamed('Key', p, 3, ListFromConcat(p[2], p[4]))
+ if non_applicable_to_types:
+ attributes = self.BuildProduction('ExtAttributes', p, 1,
+ non_applicable_to_types)
+ p[0].AddChildren(attributes)
def p_PartialDictionary(self, p):
"""PartialDictionary : DICTIONARY identifier '{' DictionaryMembers '}' ';'"""
- partial = self.BuildTrue('Partial')
- p[0] = self.BuildNamed('Dictionary', p, 2, ListFromConcat(p[4], partial))
+ p[0] = self.BuildNamed('Dictionary', p, 2, p[4])
# Error recovery for Partial Dictionary
def p_PartialDictionaryError(self, p):
@@ -413,8 +450,8 @@ class IDLParser(object):
p[0] = self.BuildNamed('Callback', p, 1, ListFromConcat(p[3], arguments))
def p_Typedef(self, p):
- """Typedef : TYPEDEF ExtendedAttributeList Type identifier ';'"""
- p[0] = self.BuildNamed('Typedef', p, 4, ListFromConcat(p[2], p[3]))
+ """Typedef : TYPEDEF TypeWithExtendedAttributes identifier ';'"""
+ p[0] = self.BuildNamed('Typedef', p, 3, p[2])
# Error recovery for Typedefs
def p_TypedefError(self, p):
@@ -578,7 +615,7 @@ class IDLParser(object):
p[0] = p[1]
def p_AttributeRest(self, p):
- """AttributeRest : ATTRIBUTE Type AttributeName ';'"""
+ """AttributeRest : ATTRIBUTE TypeWithExtendedAttributes AttributeName ';'"""
p[0] = self.BuildNamed('Attribute', p, 3, p[2])
def p_AttributeName(self, p):
@@ -635,7 +672,7 @@ class IDLParser(object):
if len(p) > 1:
p[0] = p[1]
else:
- p[0] = '_unnamed_'
+ p[0] = ''
def p_ArgumentList(self, p):
"""ArgumentList : Argument Arguments
@@ -660,19 +697,24 @@ class IDLParser(object):
p[0] = self.BuildError(p, 'Arguments')
def p_Argument(self, p):
- """Argument : ExtendedAttributeList OptionalOrRequiredArgument"""
- p[2].AddChildren(p[1])
- p[0] = p[2]
-
- def p_OptionalOrRequiredArgument(self, p):
- """OptionalOrRequiredArgument : OPTIONAL Type ArgumentName Default
- | Type Ellipsis ArgumentName"""
- if len(p) > 4:
- arg = self.BuildNamed('Argument', p, 3, ListFromConcat(p[2], p[4]))
- arg.AddChildren(self.BuildTrue('OPTIONAL'))
+ """Argument : ExtendedAttributeList OPTIONAL TypeWithExtendedAttributes ArgumentName Default
+ | ExtendedAttributeList Type Ellipsis ArgumentName"""
+ if len(p) > 5:
+ p[0] = self.BuildNamed('Argument', p, 4, ListFromConcat(p[3], p[5]))
+ p[0].AddChildren(self.BuildTrue('OPTIONAL'))
+ p[0].AddChildren(p[1])
else:
- arg = self.BuildNamed('Argument', p, 3, ListFromConcat(p[1], p[2]))
- p[0] = arg
+ applicable_to_types, non_applicable_to_types = \
+ DivideExtAttrsIntoApplicableAndNonApplicable(p[1])
+ if applicable_to_types:
+ attributes = self.BuildProduction('ExtAttributes', p, 1,
+ applicable_to_types)
+ p[2].AddChildren(attributes)
+ p[0] = self.BuildNamed('Argument', p, 4, ListFromConcat(p[2], p[3]))
+ if non_applicable_to_types:
+ attributes = self.BuildProduction('ExtAttributes', p, 1,
+ non_applicable_to_types)
+ p[0].AddChildren(attributes)
def p_ArgumentName(self, p):
"""ArgumentName : ArgumentNameKeyword
@@ -687,12 +729,12 @@ class IDLParser(object):
p[0].AddChildren(self.BuildTrue('ELLIPSIS'))
def p_Iterable(self, p):
- """Iterable : ITERABLE '<' Type OptionalType '>' ';'"""
+ """Iterable : ITERABLE '<' TypeWithExtendedAttributes OptionalType '>' ';'"""
childlist = ListFromConcat(p[3], p[4])
p[0] = self.BuildProduction('Iterable', p, 2, childlist)
def p_OptionalType(self, p):
- """OptionalType : ',' Type
+ """OptionalType : ',' TypeWithExtendedAttributes
|"""
if len(p) > 1:
p[0] = p[2]
@@ -706,29 +748,61 @@ class IDLParser(object):
p[0] = p[1]
def p_MaplikeRest(self, p):
- """MaplikeRest : MAPLIKE '<' Type ',' Type '>' ';'"""
+ """MaplikeRest : MAPLIKE '<' TypeWithExtendedAttributes ',' TypeWithExtendedAttributes '>' ';'"""
childlist = ListFromConcat(p[3], p[5])
p[0] = self.BuildProduction('Maplike', p, 2, childlist)
def p_SetlikeRest(self, p):
- """SetlikeRest : SETLIKE '<' Type '>' ';'"""
+ """SetlikeRest : SETLIKE '<' TypeWithExtendedAttributes '>' ';'"""
p[0] = self.BuildProduction('Setlike', p, 2, p[3])
+ def p_Namespace(self, p):
+ """Namespace : NAMESPACE identifier '{' NamespaceMembers '}' ';'"""
+ p[0] = self.BuildNamed('Namespace', p, 2, p[4])
+
+ # Error recovery for namespace.
+ def p_NamespaceError(self, p):
+ """Namespace : NAMESPACE identifier '{' error"""
+ p[0] = self.BuildError(p, 'Namespace')
+
+ def p_NamespaceMembers(self, p):
+ """NamespaceMembers : NamespaceMember NamespaceMembers
+ | """
+ if len(p) > 1:
+ p[0] = ListFromConcat(p[1], p[2])
+
+ # Error recovery for NamespaceMembers
+ def p_NamespaceMembersError(self, p):
+ """NamespaceMembers : ExtendedAttributeList error"""
+ p[0] = self.BuildError(p, 'NamespaceMembers')
+
+ def p_NamespaceMember(self, p):
+ """NamespaceMember : ExtendedAttributeList ReturnType OperationRest
+ | ExtendedAttributeList READONLY AttributeRest"""
+ if p[2] != 'readonly':
+ applicable_to_types, non_applicable_to_types = \
+ DivideExtAttrsIntoApplicableAndNonApplicable(p[1])
+ if applicable_to_types:
+ attributes = self.BuildProduction('ExtAttributes', p, 1,
+ applicable_to_types)
+ p[2].AddChildren(attributes)
+ p[3].AddChildren(p[2])
+ if non_applicable_to_types:
+ attributes = self.BuildProduction('ExtAttributes', p, 1,
+ non_applicable_to_types)
+ p[3].AddChildren(attributes)
+ else:
+ p[3].AddChildren(self.BuildTrue('READONLY'))
+ p[3].AddChildren(p[1])
+ p[0] = p[3]
+
# This rule has custom additions (i.e. SpecialComments).
def p_ExtendedAttributeList(self, p):
- """ExtendedAttributeList : SpecialComments '[' ExtendedAttribute ExtendedAttributes ']'
- | '[' ExtendedAttribute ExtendedAttributes ']'
- | SpecialComments
+ """ExtendedAttributeList : '[' ExtendedAttribute ExtendedAttributes ']'
| """
- if len(p) > 5:
- items = ListFromConcat(p[3], p[4])
- attribs = self.BuildProduction('ExtAttributes', p, 2, items)
- p[0] = ListFromConcat(p[1], attribs)
- elif len(p) > 4:
+ if len(p) > 4:
items = ListFromConcat(p[2], p[3])
p[0] = self.BuildProduction('ExtAttributes', p, 1, items)
- elif len(p) > 1:
- p[0] = p[1]
# Error recovery for ExtendedAttributeList
def p_ExtendedAttributeListError(self, p):
@@ -774,6 +848,7 @@ class IDLParser(object):
| IMPLEMENTS
| INHERIT
| LEGACYCALLER
+ | NAMESPACE
| PARTIAL
| SERIALIZER
| SETTER
@@ -791,6 +866,15 @@ class IDLParser(object):
else:
p[0] = self.BuildProduction('Type', p, 1, ListFromConcat(p[1], p[2]))
+ def p_TypeWithExtendedAttributes(self, p):
+ """ TypeWithExtendedAttributes : ExtendedAttributeList SingleType
+ | ExtendedAttributeList UnionType Null"""
+ if len(p) < 4:
+ p[0] = self.BuildProduction('Type', p, 2, p[2])
+ else:
+ p[0] = self.BuildProduction('Type', p, 2, ListFromConcat(p[2], p[3]))
+ p[0].AddChildren(p[1])
+
def p_SingleType(self, p):
"""SingleType : NonAnyType
| ANY"""
@@ -826,8 +910,8 @@ class IDLParser(object):
"""NonAnyType : PrimitiveType Null
| PromiseType Null
| identifier Null
- | SEQUENCE '<' Type '>' Null
- | FROZENARRAY '<' Type '>' Null
+ | SEQUENCE '<' TypeWithExtendedAttributes '>' Null
+ | FROZENARRAY '<' TypeWithExtendedAttributes '>' Null
| RecordType Null"""
if len(p) == 3:
if type(p[1]) == str:
@@ -1013,7 +1097,7 @@ class IDLParser(object):
p[0] = self.BuildNamed('StringType', p, 1)
def p_RecordType(self, p):
- """RecordType : RECORD '<' StringType ',' Type '>'"""
+ """RecordType : RECORD '<' StringType ',' TypeWithExtendedAttributes '>'"""
p[0] = self.BuildProduction('Record', p, 2, ListFromConcat(p[3], p[5]))
# Error recovery for RecordType.
@@ -1132,8 +1216,8 @@ class IDLParser(object):
def BuildError(self, p, prod):
self._parse_errors += 1
name = self.BuildAttribute('NAME', self._last_error_msg)
- line = self.BuildAttribute('LINE', self._last_error_lineno)
- pos = self.BuildAttribute('POS', self._last_error_pos)
+ line = self.BuildAttribute('LINENO', self._last_error_lineno)
+ pos = self.BuildAttribute('POSITION', self._last_error_pos)
prod = self.BuildAttribute('PROD', prod)
node = self.BuildProduction('Error', p, 1,
@@ -1215,7 +1299,7 @@ def main(argv):
ast = IDLNode('AST', '__AST__', 0, 0, nodes)
- print '\n'.join(ast.Tree(accept_props=['PROD']))
+ print '\n'.join(ast.Tree())
if errors:
print '\nFound %d errors.\n' % errors
diff --git a/chromium/tools/idl_parser/idl_parser_test.py b/chromium/tools/idl_parser/idl_parser_test.py
index cd0a70927fe..6aeabd44927 100755
--- a/chromium/tools/idl_parser/idl_parser_test.py
+++ b/chromium/tools/idl_parser/idl_parser_test.py
@@ -4,6 +4,7 @@
# found in the LICENSE file.
import glob
+import os
import unittest
from idl_lexer import IDLLexer
@@ -20,7 +21,9 @@ class WebIDLParser(unittest.TestCase):
def setUp(self):
self.parser = IDLParser(IDLLexer(), mute_error=True)
- self.filenames = glob.glob('test_parser/*_web.idl')
+ test_dir = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), 'test_parser'))
+ self.filenames = glob.glob('%s/*_web.idl' % test_dir)
def _TestNode(self, node):
comments = node.GetListOf('SpecialComment')
@@ -397,4 +400,4 @@ class TestDefaultValue(unittest.TestCase):
self._CheckDefaultValue(default_value, 'NULL', 'NULL')
if __name__ == '__main__':
- unittest.main(verbosity=2) \ No newline at end of file
+ unittest.main(verbosity=2)
diff --git a/chromium/tools/idl_parser/test_parser/dictionary_web.idl b/chromium/tools/idl_parser/test_parser/dictionary_web.idl
index 62915002f60..bcf91e0ae5d 100644
--- a/chromium/tools/idl_parser/test_parser/dictionary_web.idl
+++ b/chromium/tools/idl_parser/test_parser/dictionary_web.idl
@@ -35,6 +35,7 @@ dictionary MyDictInherit : Foo {};
/** TREE
*Dictionary(MyDictPartial)
+ * PARTIAL: True
*/
partial dictionary MyDictPartial { };
@@ -64,6 +65,7 @@ dictionary MyDictBig {
/** TREE
*Dictionary(MyDictRequired)
* Key(setLong)
+ * REQUIRED: True
* Type()
* PrimitiveType(long)
*/
@@ -81,10 +83,18 @@ dictionary {
* Key(mandatory)
* Type()
* StringType(DOMString)
+ * Key(sequenceOfLongWithClamp)
+ * Type()
+ * Sequence()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
* Error(Unexpected keyword "optional" after ">".)
*/
dictionary MyDictionaryInvalidOptional {
DOMString mandatory;
+ sequence<[Clamp] long> sequenceOfLongWithClamp;
sequence<DOMString> optional;
};
@@ -97,6 +107,7 @@ dictionary ForParent NoColon {
*Dictionary(MyDictNull)
* Key(setString)
* Type()
+ * NULLABLE: True
* StringType(DOMString)
* Default() = "NULL"
*/
@@ -108,3 +119,189 @@ dictionary MyDictNull {
dictionary MyDictUnexpectedAttribute {
attribute DOMString foo = "";
};
+
+/** TREE
+ *Dictionary(MyDictRequiredClampNotAppliedToType)
+ * Key(setLong)
+ * REQUIRED: True
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+dictionary MyDictRequiredClampNotAppliedToType {
+ [Clamp] required long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDictRequired)
+ * Key(setLong)
+ * REQUIRED: True
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr) = "foo"
+ */
+dictionary MyDictRequired {
+ [XAttr = foo] required long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDictRequired)
+ * Key(setLong)
+ * REQUIRED: True
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr1)
+ * ExtAttribute(XAttr2)
+ */
+dictionary MyDictRequired {
+ [XAttr1, XAttr2] required long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDictRequired)
+ * Key(setLong)
+ * REQUIRED: True
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(EnforceRange)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+dictionary MyDictRequired {
+ [Clamp] required [EnforceRange] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDictRequired)
+ * Key(setLong)
+ * REQUIRED: True
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr) = "foo"
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+dictionary MyDictRequired {
+ [Clamp] required [XAttr = foo] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDictRequired)
+ * Key(setLong)
+ * REQUIRED: True
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr1)
+ * ExtAttribute(XAttr2)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+dictionary MyDictRequired {
+ [Clamp] required [XAttr1, XAttr2] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+dictionary MyDict {
+ [Clamp] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr)
+ */
+dictionary MyDict {
+ [XAttr] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr) = "foo"
+ */
+dictionary MyDict {
+ [XAttr = foo] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * ExtAttributes()
+ * ExtAttribute(XAttr)
+ */
+dictionary MyDict {
+ [XAttr, Clamp] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(foo)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * Default() = "2"
+ */
+dictionary MyDict {
+ [Clamp] long foo = 2;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setString)
+ * Type()
+ * StringType(DOMString)
+ * ExtAttributes()
+ * ExtAttribute(XAttr)
+ */
+dictionary MyDict {
+ [XAttr] DOMString setString;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr) = "bar"
+ */
+dictionary MyDict {
+ [XAttr = bar] long setLong;
+};
+
+/** TREE
+ *Dictionary(MyDict)
+ * Key(setLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(XAttr1)
+ * ExtAttribute(XAttr2)
+ */
+dictionary MyDict {
+ [XAttr1, XAttr2] long setLong;
+};
diff --git a/chromium/tools/idl_parser/test_parser/interface_web.idl b/chromium/tools/idl_parser/test_parser/interface_web.idl
index e80a85d6fae..589a47c29df 100644
--- a/chromium/tools/idl_parser/test_parser/interface_web.idl
+++ b/chromium/tools/idl_parser/test_parser/interface_web.idl
@@ -35,6 +35,7 @@ interface MyIFaceInherit : Foo {};
/** TREE
*Interface(MyIFacePartial)
+ * PARTIAL: True
*/
partial interface MyIFacePartial { };
@@ -98,6 +99,7 @@ interface MyIFaceWrongRecordKeyType {
*Interface(MyIFaceBig)
* Const(setString)
* StringType(DOMString)
+ * NULLABLE: True
* Value() = "NULL"
*/
interface MyIFaceBig {
@@ -109,6 +111,7 @@ interface MyIFaceBig {
* Operation(foo)
* Arguments()
* Argument(arg)
+ * OPTIONAL: True
* Type()
* Sequence()
* Type()
@@ -131,6 +134,8 @@ interface MyIfaceEmptySequenceDefalutValue {
* StringType(DOMString)
* Type()
* PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(EnforceRange)
* Type()
* PrimitiveType(void)
* Operation(bar)
@@ -148,7 +153,7 @@ interface MyIfaceEmptySequenceDefalutValue {
* PrimitiveType(double)
*/
interface MyIfaceWithRecords {
- void foo(record<DOMString, long> arg);
+ void foo(record<DOMString, [EnforceRange] long> arg);
double bar(int arg1, record<ByteString, float> arg2);
};
@@ -156,6 +161,7 @@ interface MyIfaceWithRecords {
*Interface(MyIFaceBig2)
* Const(nullValue)
* StringType(DOMString)
+ * NULLABLE: True
* Value() = "NULL"
* Const(longValue)
* PrimitiveType(long)
@@ -167,9 +173,11 @@ interface MyIfaceWithRecords {
* Type()
* StringType(DOMString)
* Attribute(readOnlyString)
+ * READONLY: True
* Type()
* StringType(DOMString)
* Attribute(staticString)
+ * STATIC: True
* Type()
* StringType(DOMString)
* Operation(myFunction)
@@ -180,6 +188,7 @@ interface MyIfaceWithRecords {
* Type()
* PrimitiveType(void)
* Operation(staticFunction)
+ * STATIC: True
* Arguments()
* Argument(myLong)
* Type()
@@ -201,13 +210,16 @@ interface MyIFaceBig2 {
/** TREE
*Interface(MyIFaceSpecials)
* Operation(set)
+ * CREATOR: True
+ * SETTER: True
* Arguments()
* Argument(property)
* Type()
* StringType(DOMString)
* Type()
* PrimitiveType(void)
- * Operation(_unnamed_)
+ * Operation()
+ * GETTER: True
* Arguments()
* Argument(property)
* Type()
@@ -224,7 +236,7 @@ interface MyIFaceSpecials {
*Interface(MyIFaceStringifiers)
* Stringifier()
* Stringifier()
- * Operation(_unnamed_)
+ * Operation()
* Arguments()
* Type()
* StringType(DOMString)
@@ -308,15 +320,45 @@ interface MyIfacePromise {
* PrimitiveType(double)
* Type()
* StringType(DOMString)
+ * Iterable()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * Iterable()
+ * Type()
+ * StringType(DOMString)
+ * ExtAttributes()
+ * ExtAttribute(TreatNullAs) = "EmptyString"
+ * Iterable()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * ExtAttribute(XAttr)
+ * Iterable()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * Type()
+ * PrimitiveType(long long)
+ * ExtAttributes()
+ * ExtAttribute(EnforceRange)
*/
interface MyIfaceIterable {
iterable<long>;
iterable<double, DOMString>;
+ iterable<[Clamp] long>;
+ iterable<[TreatNullAs=EmptyString] DOMString>;
+ iterable<[Clamp, XAttr] long>;
+ iterable<[Clamp] long, [EnforceRange] long long>;
};
/** TREE
*Interface(MyIfaceMaplike)
* Maplike()
+ * READONLY: True
* Type()
* PrimitiveType(long)
* Type()
@@ -326,24 +368,41 @@ interface MyIfaceIterable {
* PrimitiveType(double)
* Type()
* PrimitiveType(boolean)
+ * Maplike()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * Type()
+ * StringType(DOMString)
+ * ExtAttributes()
+ * ExtAttribute(XAttr)
*/
interface MyIfaceMaplike {
readonly maplike<long, DOMString>;
maplike<double, boolean>;
+ maplike<[Clamp] long, [XAttr] DOMString>;
};
/** TREE
*Interface(MyIfaceSetlike)
* Setlike()
+ * READONLY: True
* Type()
* PrimitiveType(long)
* Setlike()
* Type()
* PrimitiveType(double)
+ * Setlike()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(EnforceRange)
*/
interface MyIfaceSetlike {
readonly setlike<long>;
setlike<double>;
+ setlike<[EnforceRange] long>;
};
/** TREE
@@ -355,26 +414,38 @@ interface MyIfaceSetlike {
* Type()
* Any()
* Serializer()
+ * ATTRIBUTE: name
* Serializer()
* Map()
* Serializer()
* Map()
+ * GETTER: True
* Serializer()
* Map()
+ * ATTRIBUTE: True
* Serializer()
* Map()
+ * ATTRIBUTE: True
+ * INHERIT: True
* Serializer()
* Map()
+ * INHERIT: True
+ * ATTRIBUTES: None
* Serializer()
* Map()
+ * INHERIT: True
+ * ATTRIBUTES: ['name1', 'name2']
* Serializer()
* Map()
+ * ATTRIBUTES: ['name1', 'name2']
* Serializer()
* List()
* Serializer()
* List()
+ * GETTER: True
* Serializer()
* List()
+ * ATTRIBUTES: ['name1', 'name2']
*/
interface MyIfaceSerializer {
serializer;
@@ -395,13 +466,23 @@ interface MyIfaceSerializer {
/** TREE
*Interface(MyIfaceFrozenArray)
* Attribute(foo)
+ * READONLY: True
+ * Type()
+ * FrozenArray()
+ * Type()
+ * StringType(DOMString)
+ * Attribute(bar)
+ * READONLY: True
* Type()
* FrozenArray()
* Type()
* StringType(DOMString)
+ * ExtAttributes()
+ * ExtAttribute(TreatNullAs) = "EmptyString"
*/
interface MyIfaceFrozenArray {
readonly attribute FrozenArray<DOMString> foo;
+ readonly attribute FrozenArray<[TreatNullAs=EmptyString] DOMString> bar;
};
/** TREE
@@ -416,4 +497,128 @@ interface MyIfaceFrozenArray {
*/
interface MyIfaceUnion {
attribute (DOMString or long) foo;
-}; \ No newline at end of file
+};
+
+/** TREE
+ *Interface(MyIfaceAttributeRestClamp)
+ * Attribute(myLong)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+interface MyIfaceAttributeRestClamp {
+ attribute [Clamp] long myLong;
+};
+
+/** TREE
+ *Interface(MyIFaceArgumentWithAnnotatedType1)
+ * Operation(myFunction)
+ * Arguments()
+ * Argument(myLong)
+ * OPTIONAL: True
+ * Type()
+ * PrimitiveType(long long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * Type()
+ * PrimitiveType(void)
+ */
+interface MyIFaceArgumentWithAnnotatedType1 {
+ void myFunction(optional [Clamp] long long myLong);
+};
+
+/** TREE
+ *Interface(MyIFaceArgumentWithAnnotatedType2)
+ * Operation(voidMethodTestArgumentWithExtAttribute1)
+ * Arguments()
+ * Argument(myLong)
+ * Type()
+ * PrimitiveType(long long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ * ExtAttributes()
+ * ExtAttribute(XAttr)
+ * Type()
+ * PrimitiveType(void)
+ * Operation(voidMethodTestArgumentWithExtAttribute2)
+ * Arguments()
+ * Argument(longArg)
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(EnforceRange)
+ * Type()
+ * PrimitiveType(void)
+ */
+interface MyIFaceArgumentWithAnnotatedType2 {
+ void voidMethodTestArgumentWithExtAttribute1([Clamp, XAttr] long long myLong);
+ void voidMethodTestArgumentWithExtAttribute2([EnforceRange] long longArg);
+};
+
+/** TREE
+ *Interface(InterfaceConstructors)
+ * ExtAttributes()
+ * ExtAttribute(Constructor)
+ * ExtAttribute(Constructor)
+ * Arguments()
+ * Argument(doubleArg)
+ * Type()
+ * PrimitiveType(double)
+ * ExtAttribute(CustomConstructor)
+ * ExtAttribute(CustomConstructor)
+ * Arguments()
+ * Argument(doubleArg)
+ * Type()
+ * PrimitiveType(double)
+ * ExtAttribute(NamedConstructor) = "Audio"
+ * ExtAttribute(NamedConstructor)
+ * Call(Audio)
+ * Arguments()
+ * Argument(src)
+ * Type()
+ * StringType(DOMString)
+ */
+[
+ Constructor,
+ Constructor(double doubleArg),
+ CustomConstructor,
+ CustomConstructor(double doubleArg),
+ NamedConstructor=Audio,
+ NamedConstructor=Audio(DOMString src)
+] interface InterfaceConstructors { };
+
+/** TREE
+ *Interface(InterfaceExposed)
+ * ExtAttributes()
+ * ExtAttribute(Exposed) = "Window"
+ * ExtAttribute(Exposed) = "['Window', 'Worker']"
+ * ExtAttribute(Exposed)
+ * Arguments()
+ * Argument(Feature1)
+ * Type()
+ * Typeref(Window)
+ * ExtAttribute(Exposed)
+ * Arguments()
+ * Argument(Feature1)
+ * Type()
+ * Typeref(Window)
+ * Argument(Feature2)
+ * Type()
+ * Typeref(Worker)
+ */
+[
+ Exposed=Window,
+ Exposed=(Window, Worker),
+ Exposed(Window Feature1),
+ Exposed(Window Feature1, Worker Feature2)
+] interface InterfaceExposed { };
+
+/** TREE
+ *Interface(InterfaceExposedError)
+ * ExtAttributes()
+ * ExtAttribute(Exposed)
+ * Arguments()
+ * Error(Unexpected ,.)
+ */
+[ Exposed(Window, Worker) ] interface InterfaceExposedError { };
diff --git a/chromium/tools/idl_parser/test_parser/namespace_web.idl b/chromium/tools/idl_parser/test_parser/namespace_web.idl
new file mode 100644
index 00000000000..06e42a46adf
--- /dev/null
+++ b/chromium/tools/idl_parser/test_parser/namespace_web.idl
@@ -0,0 +1,150 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* Test Namespace productions
+
+Run with --test to generate an AST and verify that all comments accurately
+reflect the state of the Nodes.
+
+TREE
+Type(Name)
+ Type(Name)
+ Type(Name)
+ Type(Name)
+ ...
+This comment signals that a tree of nodes matching the BUILD comment
+symatics should exist. This is an exact match.
+*/
+
+
+/** TREE
+ *Namespace(MyNamespace)
+ */
+namespace MyNamespace { };
+
+/** TREE
+ *Namespace(MyNamespace2)
+ * Operation(fooLong)
+ * Arguments()
+ * Type()
+ * PrimitiveType(long)
+ * Operation(voidArgLong)
+ * Arguments()
+ * Argument(arg)
+ * Type()
+ * PrimitiveType(long)
+ * Type()
+ * PrimitiveType(void)
+ */
+namespace MyNamespace2 {
+ long fooLong();
+ void voidArgLong(long arg);
+};
+
+/** TREE
+ *Namespace(MyNamespaceMissingArgument)
+ * Operation(foo)
+ * Arguments()
+ * Argument(arg)
+ * Type()
+ * StringType(DOMString)
+ * Error(Missing argument.)
+ * Type()
+ * PrimitiveType(void)
+ */
+namespace MyNamespaceMissingArgument {
+ void foo(DOMString arg, );
+};
+
+/** TREE
+ *Namespace(VectorUtils)
+ * Attribute(unit)
+ * READONLY: True
+ * Type()
+ * Typeref(Vector)
+ * Operation(dotProduct)
+ * Arguments()
+ * Argument(x)
+ * Type()
+ * Typeref(Vector)
+ * Argument(y)
+ * Type()
+ * Typeref(Vector)
+ * Type()
+ * PrimitiveType(double)
+ * Operation(crossProduct)
+ * Arguments()
+ * Argument(x)
+ * Type()
+ * Typeref(Vector)
+ * Argument(y)
+ * Type()
+ * Typeref(Vector)
+ * Type()
+ * Typeref(Vector)
+ */
+namespace VectorUtils {
+ readonly attribute Vector unit;
+ double dotProduct(Vector x, Vector y);
+ Vector crossProduct(Vector x, Vector y);
+};
+
+/**TREE
+ *Namespace(ErrorOnlyExtAttrs)
+ * Error(Unexpected ";" after "]".)
+ */
+namespace ErrorOnlyExtAttrs {
+ [Clamp];
+};
+
+/** TREE
+ *Error(Unexpected keyword "attribute" after "{".)
+ */
+namespace ErrorNonReadonly {
+ attribute Vector unit2;
+};
+
+/** TREE
+ *Error(Unexpected ";" after "{".)
+ */
+namespace NameSpaceError {
+ ;
+
+/**TREE
+ *Namespace(PartialNamespace)
+ * PARTIAL: True
+ * Operation(fooLong)
+ * Arguments()
+ * Type()
+ * PrimitiveType(long)
+ */
+partial namespace PartialNamespace {
+ long fooLong();
+};
+
+/** TREE
+ *Namespace(NamespaceWithExtAttrs)
+ * Operation(fooLong)
+ * Arguments()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Replaceable)
+ */
+[Replaceable] namespace NamespaceWithExtAttrs {
+ long fooLong();
+};
+
+/** TREE
+ *Namespace(NamespaceAnnotatedTypeMember)
+ * Operation(fooLong)
+ * Arguments()
+ * Type()
+ * PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(Clamp)
+ */
+namespace NamespaceAnnotatedTypeMember {
+ [Clamp] long fooLong();
+};
diff --git a/chromium/tools/idl_parser/test_parser/typedef_web.idl b/chromium/tools/idl_parser/test_parser/typedef_web.idl
index b100abcd61d..7ba169fc8f9 100644
--- a/chromium/tools/idl_parser/test_parser/typedef_web.idl
+++ b/chromium/tools/idl_parser/test_parser/typedef_web.idl
@@ -31,10 +31,10 @@ typedef long MyLong;
/** TREE
*Typedef(MyLong)
- * ExtAttributes()
- * ExtAttribute(foo)
* Type()
* PrimitiveType(long)
+ * ExtAttributes()
+ * ExtAttribute(foo)
*/
typedef [foo] long MyLong;
@@ -109,6 +109,7 @@ typedef float MyFloat;
*Typedef(MyUFloat)
* Type()
* PrimitiveType(float)
+ * UNRESTRICTED: True
*/
typedef unrestricted float MyUFloat;
@@ -123,6 +124,7 @@ typedef double MyDouble;
*Typedef(MyUDouble)
* Type()
* PrimitiveType(double)
+ * UNRESTRICTED: True
*/
typedef unrestricted double MyUDouble;
diff --git a/chromium/tools/ipc_fuzzer/fuzzer/fuzzer.cc b/chromium/tools/ipc_fuzzer/fuzzer/fuzzer.cc
index e901db85191..6d9364066ec 100644
--- a/chromium/tools/ipc_fuzzer/fuzzer/fuzzer.cc
+++ b/chromium/tools/ipc_fuzzer/fuzzer/fuzzer.cc
@@ -642,8 +642,8 @@ struct FuzzTraits<base::DictionaryValue> {
};
template <>
-struct FuzzTraits<cc::CompositorFrame> {
- static bool Fuzz(cc::CompositorFrame* p, Fuzzer* fuzzer) {
+struct FuzzTraits<viz::CompositorFrame> {
+ static bool Fuzz(viz::CompositorFrame* p, Fuzzer* fuzzer) {
// TODO(mbarbella): Support mutation.
if (!fuzzer->ShouldGenerate())
return true;
@@ -675,16 +675,16 @@ struct FuzzTraits<cc::ListContainer<A>> {
};
template <>
-struct FuzzTraits<cc::QuadList> {
- static bool Fuzz(cc::QuadList* p, Fuzzer* fuzzer) {
+struct FuzzTraits<viz::QuadList> {
+ static bool Fuzz(viz::QuadList* p, Fuzzer* fuzzer) {
// TODO(mbarbella): This should actually do something.
return true;
}
};
template <>
-struct FuzzTraits<cc::RenderPass> {
- static bool Fuzz(cc::RenderPass* p, Fuzzer* fuzzer) {
+struct FuzzTraits<viz::RenderPass> {
+ static bool Fuzz(viz::RenderPass* p, Fuzzer* fuzzer) {
if (!FuzzParam(&p->id, fuzzer))
return false;
if (!FuzzParam(&p->output_rect, fuzzer))
@@ -705,8 +705,8 @@ struct FuzzTraits<cc::RenderPass> {
};
template <>
-struct FuzzTraits<cc::RenderPassList> {
- static bool Fuzz(cc::RenderPassList* p, Fuzzer* fuzzer) {
+struct FuzzTraits<viz::RenderPassList> {
+ static bool Fuzz(viz::RenderPassList* p, Fuzzer* fuzzer) {
if (!fuzzer->ShouldGenerate()) {
for (size_t i = 0; i < p->size(); ++i) {
if (!FuzzParam(p->at(i).get(), fuzzer))
@@ -717,7 +717,7 @@ struct FuzzTraits<cc::RenderPassList> {
size_t count = RandElementCount();
for (size_t i = 0; i < count; ++i) {
- std::unique_ptr<cc::RenderPass> render_pass = cc::RenderPass::Create();
+ std::unique_ptr<viz::RenderPass> render_pass = viz::RenderPass::Create();
if (!FuzzParam(render_pass.get(), fuzzer))
return false;
p->push_back(std::move(render_pass));
diff --git a/chromium/tools/ipc_fuzzer/message_lib/BUILD.gn b/chromium/tools/ipc_fuzzer/message_lib/BUILD.gn
index daf9677004b..ce870ac4ef9 100644
--- a/chromium/tools/ipc_fuzzer/message_lib/BUILD.gn
+++ b/chromium/tools/ipc_fuzzer/message_lib/BUILD.gn
@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//build/config/features.gni")
+import("//components/nacl/features.gni")
import("//remoting/remoting_enable.gni")
static_library("ipc_message_lib") {
@@ -12,6 +12,7 @@ static_library("ipc_message_lib") {
"//chrome/common",
"//chrome/common/safe_browsing:proto",
"//components/guest_view/common",
+ "//components/nacl/common:features",
"//components/network_hints/common",
"//components/safe_browsing/common",
"//components/spellcheck/common",
diff --git a/chromium/tools/ipc_fuzzer/message_lib/all_messages.h b/chromium/tools/ipc_fuzzer/message_lib/all_messages.h
index c0f22b3da07..9b8f4e824c0 100644
--- a/chromium/tools/ipc_fuzzer/message_lib/all_messages.h
+++ b/chromium/tools/ipc_fuzzer/message_lib/all_messages.h
@@ -14,8 +14,10 @@
#undef CONTENT_COMMON_FRAME_PARAM_MACROS_H_
#undef CONTENT_PUBLIC_COMMON_COMMON_PARAM_TRAITS_MACROS_H_
+#include "components/nacl/common/features.h"
+
#include "chrome/common/all_messages.h"
-#if !defined(DISABLE_NACL)
+#if BUILDFLAG(ENABLE_NACL)
#include "components/nacl/common/nacl_host_messages.h"
#endif
#include "components/guest_view/common/guest_view_message_generator.h"
diff --git a/chromium/tools/ipc_fuzzer/message_tools/BUILD.gn b/chromium/tools/ipc_fuzzer/message_tools/BUILD.gn
index dad44ffd65a..a1fcf030237 100644
--- a/chromium/tools/ipc_fuzzer/message_tools/BUILD.gn
+++ b/chromium/tools/ipc_fuzzer/message_tools/BUILD.gn
@@ -17,6 +17,7 @@ executable("ipc_message_list") {
configs += [ "//tools/ipc_fuzzer:ipc_fuzzer_tool_config" ]
deps = [
"//chrome/common/safe_browsing:proto",
+ "//components/nacl/common:features",
"//tools/ipc_fuzzer/message_lib:ipc_message_lib",
]
sources = [
diff --git a/chromium/tools/ipc_fuzzer/message_tools/message_list.cc b/chromium/tools/ipc_fuzzer/message_tools/message_list.cc
index 2afcb162f89..5e701f03715 100644
--- a/chromium/tools/ipc_fuzzer/message_tools/message_list.cc
+++ b/chromium/tools/ipc_fuzzer/message_tools/message_list.cc
@@ -56,9 +56,9 @@ static bool check_msgtable() {
exemptions.push_back(CastMediaMsgStart); // Reserved for chromecast.
exemptions.push_back(IPCTestMsgStart);
-#if defined(DISABLE_NACL)
+#if !BUILDFLAG(ENABLE_NACL)
exemptions.push_back(NaClMsgStart);
-#endif // defined(DISABLE_NACL)
+#endif // !BUILDFLAG(ENABLE_NACL)
#if !BUILDFLAG(ENABLE_WEBRTC)
exemptions.push_back(WebRtcLoggingMsgStart);
@@ -83,10 +83,6 @@ static bool check_msgtable() {
exemptions.push_back(OzoneGpuMsgStart);
#endif // !defined(USE_OZONE)
-#if !defined(OS_WIN) && !defined(OS_MACOSX)
- exemptions.push_back(ChromeUtilityExtensionsMsgStart);
-#endif
-
#if !defined(OS_WIN)
exemptions.push_back(DWriteFontProxyMsgStart);
#endif
diff --git a/chromium/tools/json_comment_eater/json_comment_eater_test.py b/chromium/tools/json_comment_eater/json_comment_eater_test.py
index 5a230eb2802..ae034749e16 100755
--- a/chromium/tools/json_comment_eater/json_comment_eater_test.py
+++ b/chromium/tools/json_comment_eater/json_comment_eater_test.py
@@ -4,6 +4,7 @@
# found in the LICENSE file.
from json_comment_eater import Nom
+import os
import unittest
class JsonCommentEaterTest(unittest.TestCase):
@@ -13,7 +14,8 @@ class JsonCommentEaterTest(unittest.TestCase):
contents as a tuple in that order.
'''
def read(file_name):
- with open(file_name, 'r') as f:
+ file_path = os.path.join(os.path.dirname(__file__), file_name)
+ with open(file_path, 'r') as f:
return f.read()
return [read(pattern % test_name)
for pattern in ('%s.json', '%s_expected.json')]
diff --git a/chromium/tools/json_schema_compiler/cc_generator.py b/chromium/tools/json_schema_compiler/cc_generator.py
index b434df550f6..63016eba085 100644
--- a/chromium/tools/json_schema_compiler/cc_generator.py
+++ b/chromium/tools/json_schema_compiler/cc_generator.py
@@ -43,7 +43,6 @@ class _Generator(object):
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "base/logging.h"')
- .Append('#include "base/memory/ptr_util.h"')
.Append('#include "base/strings/string_number_conversions.h"')
.Append('#include "base/strings/utf_string_conversions.h"')
.Append('#include "base/values.h"')
@@ -632,15 +631,12 @@ class _Generator(object):
maybe_namespace = ''
if type_.property_type == PropertyType.REF:
maybe_namespace = '%s::' % underlying_type.namespace.unix_name
- return 'base::MakeUnique<base::Value>(%sToString(%s))' % (
+ return 'std::make_unique<base::Value>(%sToString(%s))' % (
maybe_namespace, var)
elif underlying_type.property_type == PropertyType.BINARY:
if is_ptr:
- vardot = var + '->'
- else:
- vardot = var + '.'
- return ('base::Value::CreateWithCopiedBuffer('
- '%sdata(), %ssize())' % (vardot, vardot))
+ var = '*%s' % var
+ return 'std::make_unique<base::Value>(%s)' % var
elif underlying_type.property_type == PropertyType.ARRAY:
return '%s' % self._util_cc_helper.CreateValueFromArray(
var,
@@ -649,9 +645,9 @@ class _Generator(object):
if is_ptr:
var = '*%s' % var
if underlying_type.property_type == PropertyType.STRING:
- return 'base::MakeUnique<base::Value>(%s)' % var
+ return 'std::make_unique<base::Value>(%s)' % var
else:
- return 'base::MakeUnique<base::Value>(%s)' % var
+ return 'std::make_unique<base::Value>(%s)' % var
else:
raise NotImplementedError('Conversion of %s to base::Value not '
'implemented' % repr(type_.type_))
@@ -1015,7 +1011,7 @@ class _Generator(object):
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
- c.Sblock('std::string %sToString(%s enum_param) {' %
+ c.Sblock('const char* %sToString(%s enum_param) {' %
(maybe_namespace, classname))
c.Sblock('switch (enum_param) {')
for enum_value in self._type_helper.FollowRef(type_).enum_values:
diff --git a/chromium/tools/json_schema_compiler/cpp_bundle_generator.py b/chromium/tools/json_schema_compiler/cpp_bundle_generator.py
index a6531b08ebc..620b3f19151 100644
--- a/chromium/tools/json_schema_compiler/cpp_bundle_generator.py
+++ b/chromium/tools/json_schema_compiler/cpp_bundle_generator.py
@@ -290,9 +290,6 @@ class _SchemasHGenerator(object):
def Generate(self, _): # namespace not relevant, this is a bundle
c = code.Code()
- c.Append('#include <map>')
- c.Append('#include <string>')
- c.Append()
c.Append('#include "base/strings/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
@@ -301,10 +298,10 @@ class _SchemasHGenerator(object):
self._bundle._GenerateBundleClass('GeneratedSchemas'))
c.Sblock(' public:')
c.Append('// Determines if schema named |name| is generated.')
- c.Append('static bool IsGenerated(std::string name);')
+ c.Append('static bool IsGenerated(base::StringPiece name);')
c.Append()
c.Append('// Gets the API schema named |name|.')
- c.Append('static base::StringPiece Get(const std::string& name);')
+ c.Append('static base::StringPiece Get(base::StringPiece name);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
@@ -332,8 +329,6 @@ class _SchemasCCGenerator(object):
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_schemas.h')))
c.Append()
- c.Append('#include "base/lazy_instance.h"')
- c.Append()
c.Append('namespace {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
@@ -350,35 +345,37 @@ class _SchemasCCGenerator(object):
for i in xrange(0, len(json_content), max_length)]
c.Append('const char %s[] = "%s";' %
(_FormatNameAsConstant(namespace.name), '" "'.join(segments)))
- c.Append()
- c.Sblock('struct Static {')
- c.Sblock('Static() {')
- for api in self._bundle._api_defs:
- namespace = self._bundle._model.namespaces[api.get('namespace')]
- c.Append('schemas["%s"] = %s;' % (namespace.name,
- _FormatNameAsConstant(namespace.name)))
- c.Eblock('}')
- c.Append()
- c.Append('std::map<std::string, const char*> schemas;')
- c.Eblock('};')
- c.Append()
- c.Append('base::LazyInstance<Static>::DestructorAtExit g_lazy_instance;')
- c.Append()
c.Append('} // namespace')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('// static')
- c.Sblock('base::StringPiece %s::Get(const std::string& name) {' %
+ c.Sblock('bool %s::IsGenerated(base::StringPiece name) {' %
self._bundle._GenerateBundleClass('GeneratedSchemas'))
- c.Append('return IsGenerated(name) ? '
- 'g_lazy_instance.Get().schemas[name] : "";')
+ c.Append('return !Get(name).empty();')
c.Eblock('}')
c.Append()
c.Append('// static')
- c.Sblock('bool %s::IsGenerated(std::string name) {' %
+ c.Sblock('base::StringPiece %s::Get(base::StringPiece name) {' %
self._bundle._GenerateBundleClass('GeneratedSchemas'))
- c.Append('return g_lazy_instance.Get().schemas.count(name) > 0;')
+ c.Append('static const struct {')
+ c.Append(' base::StringPiece name;')
+ c.Append(' base::StringPiece schema;')
+ c.Sblock('} kSchemas[] = {')
+ namespaces = [self._bundle._model.namespaces[api.get('namespace')].name
+ for api in self._bundle._api_defs]
+ for namespace in sorted(namespaces):
+ schema_constant_name = _FormatNameAsConstant(namespace)
+ c.Append('{{"%s", %d}, {%s, sizeof(%s) - 1}},' %
+ (namespace, len(namespace),
+ schema_constant_name, schema_constant_name))
+ c.Eblock('};')
+ c.Sblock('for (const auto& schema : kSchemas) {')
+ c.Sblock('if (schema.name == name)')
+ c.Append('return schema.schema;')
+ c.Eblock()
+ c.Eblock('}')
+ c.Append('return base::StringPiece();')
c.Eblock('}')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
diff --git a/chromium/tools/json_schema_compiler/h_generator.py b/chromium/tools/json_schema_compiler/h_generator.py
index 7cf4d17ef7b..90e34d33fa3 100644
--- a/chromium/tools/json_schema_compiler/h_generator.py
+++ b/chromium/tools/json_schema_compiler/h_generator.py
@@ -214,7 +214,7 @@ class _Generator(object):
# static. On the other hand, those declared inline (e.g. in an object) do.
maybe_static = '' if is_toplevel else 'static '
(c.Append()
- .Append('%sstd::string ToString(%s as_enum);' %
+ .Append('%sconst char* ToString(%s as_enum);' %
(maybe_static, classname))
.Append('%s%s Parse%s(const std::string& as_string);' %
(maybe_static, classname, classname))
diff --git a/chromium/tools/json_schema_compiler/json_features.gni b/chromium/tools/json_schema_compiler/json_features.gni
index 7746b3b2277..c35c677fc73 100644
--- a/chromium/tools/json_schema_compiler/json_features.gni
+++ b/chromium/tools/json_schema_compiler/json_features.gni
@@ -53,13 +53,6 @@ template("json_features") {
"deps",
"public_deps",
])
-
- # Append a dependency on the extensions system. Headers in this target
- # are included by the feature compiler automatically.
- if (!defined(deps)) {
- deps = []
- }
- deps += [ "//extensions/common" ]
}
source_set(target_name) {
@@ -74,5 +67,12 @@ template("json_features") {
public_deps = []
}
public_deps += [ ":$action_name" ]
+
+ # Append a dependency on the extensions system. Headers in this target
+ # are included by the feature compiler automatically.
+ if (!defined(deps)) {
+ deps = []
+ }
+ deps += [ "//extensions/common" ]
}
}
diff --git a/chromium/tools/json_schema_compiler/util.cc b/chromium/tools/json_schema_compiler/util.cc
index 441fc253077..227ad3bb62f 100644
--- a/chromium/tools/json_schema_compiler/util.cc
+++ b/chromium/tools/json_schema_compiler/util.cc
@@ -20,7 +20,7 @@ bool ReportError(const base::Value& from,
error->append(base::ASCIIToUTF16("; "));
error->append(base::ASCIIToUTF16(base::StringPrintf(
"expected %s, got %s", base::Value::GetTypeName(expected),
- base::Value::GetTypeName(from.GetType()))));
+ base::Value::GetTypeName(from.type()))));
return false; // Always false on purpose.
}
diff --git a/chromium/tools/licenses.py b/chromium/tools/licenses.py
index 9a202c7e181..10217637054 100755
--- a/chromium/tools/licenses.py
+++ b/chromium/tools/licenses.py
@@ -107,7 +107,6 @@ ADDITIONAL_PATHS = (
os.path.join('chrome', 'test', 'chromeos', 'autotest'),
os.path.join('chrome', 'test', 'data'),
os.path.join('native_client'),
- os.path.join('sdch', 'open-vcdiff'),
os.path.join('testing', 'gmock'),
os.path.join('testing', 'gtest'),
os.path.join('tools', 'gyp'),
@@ -129,12 +128,6 @@ SPECIAL_CASES = {
"URL": "http://code.google.com/p/nativeclient",
"License": "BSD",
},
- os.path.join('sdch', 'open-vcdiff'): {
- "Name": "open-vcdiff",
- "URL": "https://github.com/google/open-vcdiff",
- "License": "Apache 2.0, MIT, GPL v2 and custom licenses",
- "License Android Compatible": "yes",
- },
os.path.join('testing', 'gmock'): {
"Name": "gmock",
"URL": "http://code.google.com/p/googlemock",
diff --git a/chromium/tools/lldb/OWNERS b/chromium/tools/lldb/OWNERS
new file mode 100644
index 00000000000..dd32882d2ea
--- /dev/null
+++ b/chromium/tools/lldb/OWNERS
@@ -0,0 +1,4 @@
+jzw@chromium.org
+ichikawa@chromium.org
+
+# COMPONENT: Tools
diff --git a/chromium/tools/lldb/lldb_chrome.py b/chromium/tools/lldb/lldb_chrome.py
new file mode 100644
index 00000000000..2c93d3414e0
--- /dev/null
+++ b/chromium/tools/lldb/lldb_chrome.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+ LLDB Support for Chromium types in Xcode
+
+ Add the following to your ~/.lldbinit:
+ command script import {Path to SRC Root}/tools/lldb/lldb_chrome.py
+"""
+
+import lldb
+
+def __lldb_init_module(debugger, internal_dict):
+ debugger.HandleCommand('type summary add -F ' +
+ 'lldb_chrome.basestring16_SummaryProvider base::string16')
+
+# This is highly dependent on libc++ being compiled with little endian.
+def basestring16_SummaryProvider(valobj, internal_dict):
+ s = valobj.GetValueForExpressionPath('.__r_.__first_.__s')
+ l = valobj.GetValueForExpressionPath('.__r_.__first_.__l')
+ size = s.GetChildMemberWithName('__size_').GetValueAsUnsigned(0)
+ is_short_string = size & 1 == 0
+ if is_short_string:
+ length = size >> 1
+ data = s.GetChildMemberWithName('__data_').GetPointeeData(0, length)
+ else:
+ length = l.GetChildMemberWithName('__size_').GetValueAsUnsigned(0)
+ data = l.GetChildMemberWithName('__data_').GetPointeeData(0, length)
+ error = lldb.SBError()
+ bytes_to_read = 2 * length
+ byte_string = data.ReadRawData(error, 0, bytes_to_read)
+ if error.fail:
+ return 'Summary error: %s' % error.description
+ else:
+ return '"' + byte_string.decode('utf-16').encode('utf-8') + '"'
diff --git a/chromium/tools/luci-go/linux64/isolate.sha1 b/chromium/tools/luci-go/linux64/isolate.sha1
index f9231473480..52ed59a4194 100644
--- a/chromium/tools/luci-go/linux64/isolate.sha1
+++ b/chromium/tools/luci-go/linux64/isolate.sha1
@@ -1 +1 @@
-48ffe036be8eff7d39ebbdbb705bd26f0ec6f404
+ccdea1387f9f26c2572bf5f2b8d24e8d66a69cf6
diff --git a/chromium/tools/luci-go/mac64/isolate.sha1 b/chromium/tools/luci-go/mac64/isolate.sha1
index 328cc063db1..85e622d4205 100644
--- a/chromium/tools/luci-go/mac64/isolate.sha1
+++ b/chromium/tools/luci-go/mac64/isolate.sha1
@@ -1 +1 @@
-f0d9ea71e7059a164962658b588286ebf262c5dd
+71329e82d274b847525f2b6345cb9fb6ddd0fff3
diff --git a/chromium/tools/luci-go/win64/isolate.exe.sha1 b/chromium/tools/luci-go/win64/isolate.exe.sha1
index 8038c79433d..76cc09d7430 100644
--- a/chromium/tools/luci-go/win64/isolate.exe.sha1
+++ b/chromium/tools/luci-go/win64/isolate.exe.sha1
@@ -1 +1 @@
-40790017e9b7856009c36768bf9244a4182ad5d1
+ded0dc9247e06fa42b653cce43f461f243ebea8b
diff --git a/chromium/tools/mb/mb.py b/chromium/tools/mb/mb.py
index 90c42bfdcdb..5a3ad63c32a 100755
--- a/chromium/tools/mb/mb.py
+++ b/chromium/tools/mb/mb.py
@@ -1083,7 +1083,8 @@ class MetaBuildWrapper(object):
def GetIsolateCommand(self, target, vals):
isolate_map = self.ReadIsolateMap()
- android = 'target_os="android"' in vals['gn_args']
+ is_android = 'target_os="android"' in vals['gn_args']
+ is_fuchsia = 'target_os="fuchsia"' in vals['gn_args']
# This should be true if tests with type='windowed_test_launcher' are
# expected to run using xvfb. For example, Linux Desktop, X11 CrOS and
@@ -1092,7 +1093,7 @@ class MetaBuildWrapper(object):
# and both can run under Xvfb.
# TODO(tonikitoo,msisov,fwang): Find a way to run tests for the Wayland
# backend.
- use_xvfb = self.platform == 'linux2' and not android
+ use_xvfb = self.platform == 'linux2' and not is_android and not is_fuchsia
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
@@ -1111,14 +1112,14 @@ class MetaBuildWrapper(object):
self.WriteFailureAndRaise('We should not be isolating %s.' % target,
output_path=None)
- if android and test_type != "script":
+ if is_android and test_type != "script":
cmdline = [
'../../build/android/test_wrapper/logdog_wrapper.py',
'--target', target,
- '--target-devices-file', '${SWARMING_BOT_FILE}',
'--logdog-bin-cmd', '../../bin/logdog_butler',
- '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
'--store-tombstones']
+ elif is_fuchsia and test_type != 'script':
+ cmdline = [os.path.join('bin', 'run_%s' % target)]
elif use_xvfb and test_type == 'windowed_test_launcher':
extra_files = [
'../../testing/test_env.py',
diff --git a/chromium/tools/mb/mb_config.pyl b/chromium/tools/mb/mb_config.pyl
index 2c09040214d..1e460c6c57e 100644
--- a/chromium/tools/mb/mb_config.pyl
+++ b/chromium/tools/mb/mb_config.pyl
@@ -96,7 +96,58 @@
'Linux ChromiumOS Builder': 'chromeos_with_codecs_release_bot',
'Linux ChromiumOS Builder (dbg)': 'chromeos_with_codecs_debug_bot',
'Linux ChromiumOS Full': 'chromeos_with_codecs_release_bot',
- 'Linux ChromiumOS Ozone Builder': 'chromeos_with_codecs_release_bot',
+ },
+
+ 'chromium.clang': {
+ 'CFI Linux CF': 'cfi_full_cfi_diag_recover_release_static',
+ 'CFI Linux ToT': 'clang_tot_cfi_full_cfi_diag_thin_lto_release_static_dcheck_always_on',
+ 'CFI Linux (icall)': 'cfi_full_diag_icall_release_static_dcheck_always_on',
+ 'CrWinAsan': 'asan_clang_fuzzer_static_v8_heap_x86_full_symbols_release',
+ 'CrWinAsan(dll)': 'asan_clang_shared_v8_heap_x86_full_symbols_release',
+ 'CrWinAsanCov': 'asan_clang_edge_fuzzer_static_v8_heap_x86_full_symbols_release',
+
+ # If CrWinClang is modified, please update CrWinClangGoma on chromium.fyi
+ # in the same way.
+ 'CrWinClang': 'clang_official_release_bot_minimal_symbols_x86',
+
+ 'CrWinClang(dbg)': 'clang_debug_bot_minimal_symbols_x86',
+ 'CrWinClang(shared)': 'clang_minimal_symbols_shared_release_bot_x86_dcheck',
+ 'CrWinClang64': 'clang_official_release_bot_minimal_symbols',
+ 'CrWinClang64(dbg)': 'win_clang_debug_bot',
+ 'CrWinClang64(dll)': 'clang_shared_release_bot_dcheck',
+ 'CrWinClangLLD': 'clang_tot_official_static_use_lld_x86',
+ 'CrWinClangLLD64': 'clang_tot_shared_release_use_lld_dcheck',
+ 'CrWinClngLLD64dbg': 'clang_tot_full_symbols_shared_debug_use_lld',
+ 'CrWinClngLLDdbg': 'clang_tot_full_symbols_shared_debug_use_lld_x86',
+ 'ToTAndroid': 'android_clang_tot_release',
+ 'ToTAndroid64': 'android_clang_tot_release_arm64',
+ 'ToTAndroidASan': 'android_clang_tot_asan',
+ 'ToTAndroid (dbg)': 'android_clang_tot_dbg',
+ 'ToTAndroid': 'android_clang_tot_release',
+ 'ToTAndroid64': 'android_clang_tot_release_arm64',
+ 'ToTAndroid x64': 'android_clang_tot_x64',
+ 'ToTLinux': 'clang_tot_linux_full_symbols_shared_release',
+ 'ToTLinux (dbg)': 'clang_tot_shared_debug',
+ 'ToTLinuxASan': 'clang_tot_asan_lsan_static_release',
+ 'ToTLinuxASanLibfuzzer': 'release_libfuzzer_asan_clang_tot',
+ 'ToTLinuxMSan': 'clang_tot_msan_release',
+ 'ToTLinuxLLD': 'clang_tot_lld_release_shared',
+ 'ToTLinuxThinLTO': 'clang_tot_release_full_symbols_thin_lto_static_use_lld',
+ 'ToTLinuxUBSanVptr': 'clang_tot_edge_ubsan_no_recover_hack_static_release',
+ 'ToTMac': 'clang_tot_minimal_symbols_shared_release',
+ 'ToTMac (dbg)': 'clang_tot_shared_debug',
+ 'ToTMacASan': 'asan_disable_nacl_clang_tot_full_symbols_static_release',
+ 'ToTWin': 'clang_tot_official_minimal_symbols_static_release_x86',
+ 'ToTWin(dbg)': 'clang_tot_shared_debug_x86',
+ 'ToTWin(dll)': 'clang_tot_minimal_symbols_shared_release_x86_dcheck',
+ 'ToTWin64': 'clang_tot_official_minimal_symbols_static_release',
+ 'ToTWin64(dbg)': 'clang_tot_shared_debug',
+ 'ToTWin64(dll)': 'clang_tot_shared_release_dcheck',
+ 'ToTWinCFI': 'clang_tot_cfi_full_cfi_diag_thin_lto_release_static_dcheck_always_on_x86',
+ 'ToTWinCFI64': 'clang_tot_cfi_full_cfi_diag_thin_lto_release_static_dcheck_always_on',
+ 'ToTWinThinLTO64': 'clang_tot_official_full_symbols_thin_lto_static_use_lld',
+ 'ToTiOS': 'ios',
+ 'UBSanVptr Linux': 'ubsan_vptr_release_bot',
},
'chromium.fyi': {
@@ -108,7 +159,7 @@
'Browser Side Navigation Linux': 'release_bot',
'CFI Linux CF': 'cfi_full_cfi_diag_recover_release_static',
'CFI Linux ToT': 'clang_tot_cfi_full_cfi_diag_thin_lto_release_static_dcheck_always_on',
- 'CFI Linux (icall)': 'cfi_full_diag_recover_icall_static',
+ 'CFI Linux (icall)': 'cfi_full_diag_icall_release_static_dcheck_always_on',
'ChromeOS amd64 Chromium Goma Canary': 'cros_chrome_sdk',
'Chromium Linux Goma Canary': 'release_bot',
'Chromium Linux Goma Canary': 'release_bot',
@@ -139,54 +190,20 @@
'CrWin7Goma(clbr)': 'shared_release_bot_x86',
'CrWin7Goma(dbg)': 'debug_bot_x86_minimal_symbols',
'CrWin7Goma(dll)': 'shared_release_bot_x86',
-
- # if CrWinClang is modified, please update CrWinClangGoma in the same way.
- 'CrWinClang': 'clang_official_release_bot_minimal_symbols_x86',
-
- 'CrWinClang(dbg)': 'clang_debug_bot_minimal_symbols_x86',
- 'CrWinClang64': 'clang_official_release_bot_minimal_symbols',
- 'CrWinClang64(dll)': 'clang_shared_release_bot_dcheck',
- 'CrWinClangGoma': 'clang_official_optimize_release_bot_minimal_symbols_x86',
'CrWinGoma': 'release_bot_x86_minimal_symbols',
'CrWinGoma(dll)': 'shared_release_bot_x86',
'CrWinGoma(loc)': 'shared_release_bot_x86',
- 'ClangToTAndroidASan': 'android_clang_tot_asan',
- 'ClangToTAndroid (dbg)': 'android_clang_tot_dbg',
- 'ClangToTAndroid': 'android_clang_tot_release',
- 'ClangToTAndroid64': 'android_clang_tot_release_arm64',
- 'ClangToTAndroid x64': 'android_clang_tot_x64',
- 'ClangToTLinux': 'clang_tot_linux_full_symbols_shared_release',
- 'ClangToTLinux (dbg)': 'clang_tot_shared_debug',
- 'ClangToTLinuxASan': 'clang_tot_asan_lsan_static_release',
- 'ClangToTLinuxASanLibfuzzer': 'release_libfuzzer_asan_clang_tot',
- 'ClangToTLinuxLLD': 'clang_tot_lld_release_shared',
- 'ClangToTLinuxUBSanVptr': 'clang_tot_edge_ubsan_no_recover_hack_static_release',
- 'ClangToTMac': 'clang_tot_minimal_symbols_shared_release',
- 'ClangToTMac (dbg)': 'clang_tot_shared_debug',
- 'ClangToTMacASan': 'asan_disable_nacl_clang_tot_full_symbols_static_release',
- 'ClangToTWin': 'clang_tot_official_minimal_symbols_static_release_x86',
- 'ClangToTWin(dbg)': 'clang_tot_shared_debug_x86',
- 'ClangToTWin(dll)': 'clang_tot_minimal_symbols_shared_release_x86_dcheck',
- 'ClangToTWin64': 'clang_tot_official_minimal_symbols_static_release',
- 'ClangToTWin64(dbg)': 'clang_tot_shared_debug',
- 'ClangToTWin64(dll)': 'clang_tot_shared_release_dcheck',
- 'ClangToTiOS': 'ios',
- 'Closure Compilation Linux': 'closure_compilation',
- 'CrWinAsan': 'asan_clang_fuzzer_static_v8_heap_x86_full_symbols_release',
- 'CrWinAsan(dll)': 'asan_clang_shared_v8_heap_x86_full_symbols_release',
- 'CrWinAsanCov': 'asan_clang_edge_fuzzer_static_v8_heap_x86_full_symbols_release',
- 'CrWinClang(shared)': 'clang_minimal_symbols_shared_release_bot_x86_dcheck',
- 'CrWinClang64(dbg)': 'win_clang_debug_bot',
- 'CrWinClangLLD': 'clang_tot_official_static_use_lld_x86',
- 'CrWinClangLLD64': 'clang_tot_shared_release_use_lld_dcheck',
- 'CrWinClngLLD64dbg': 'clang_tot_full_symbols_shared_debug_use_lld',
- 'CrWinClngLLDdbg': 'clang_tot_full_symbols_shared_debug_use_lld_x86',
+ 'CrWinClangGoma': 'clang_official_optimize_release_bot_minimal_symbols_x86',
+
'EarlGreyiOS': 'ios',
'Fuchsia': 'release_bot_fuchsia',
'Fuchsia (dbg)': 'debug_bot_fuchsia',
'GomaCanaryiOS': 'ios',
'ios-simulator': 'ios',
'Headless Linux (dbg)': 'headless_linux_debug_bot',
+ 'Jumbo Linux x64': 'jumbo_release_bot',
+ 'Jumbo Mac': 'jumbo_release_bot',
+ 'Jumbo Win x64': 'jumbo_release_bot',
'MD Top Chrome ChromeOS material-hybrid': 'chromeos_with_codecs_debug_bot',
'MD Top Chrome ChromeOS non-material': 'chromeos_with_codecs_debug_bot',
'MD Top Chrome Win material': 'debug_bot_minimal_symbols',
@@ -218,8 +235,6 @@
'Site Isolation Android': 'android_release_bot_minimal_symbols_arm64',
'Site Isolation Linux': 'release_trybot',
'Site Isolation Win': 'release_trybot_x86',
- 'ThinLTO Linux ToT': 'thin_lto_clang_tot_full_symbols_release_static_use_lld',
- 'UBSanVptr Linux': 'ubsan_vptr_release_bot',
'WebKit Linux - RandomOrder': 'release_trybot',
'WebKit Mac - RandomOrder': 'release_trybot',
'WebKit Win - RandomOrder': 'release_bot_x86_minimal_symbols',
@@ -235,6 +250,8 @@
'CrWinGomaStaging': 'release_bot_x86_minimal_symbols',
'Chromium Linux Goma GCE Staging': 'release_bot',
'Chromium Mac Goma GCE Staging': 'release_bot',
+ 'CrWinGomaGCEStaging': 'release_bot_x86_minimal_symbols',
+ 'CrWinClangGomaGCEStaging': 'win_clang_release_bot',
},
'chromium.gpu': {
@@ -257,6 +274,7 @@
'Android Release (Nexus 9)': 'android_release_trybot_arm64',
'Android Release (NVIDIA Shield TV)': 'android_release_trybot_arm64',
'GPU Linux Builder': 'gpu_fyi_tests_release_trybot',
+ 'GPU Linux Ozone Builder': 'gpu_fyi_tests_ozone_linux_system_gbm_libdrm_release_trybot',
'GPU Linux Builder (dbg)': 'gpu_fyi_tests_debug_trybot',
'GPU Linux dEQP Builder': 'deqp_release_trybot',
'GPU Mac Builder': 'gpu_fyi_tests_release_trybot',
@@ -269,9 +287,6 @@
'GPU Win x64 Builder': 'gpu_fyi_tests_release_trybot',
'GPU Win x64 Builder (dbg)': 'gpu_fyi_tests_debug_trybot',
'GPU Win x64 dEQP Builder': 'deqp_release_no_clang_trybot',
- 'Linux ChromiumOS Builder': 'gpu_fyi_tests_chromeos_cros_release_trybot',
- # This is, confusingly, apparently not actually building ChromiumOS.
- 'Linux ChromiumOS Ozone Builder': 'gpu_fyi_tests_ozone_linux_system_gbm_libdrm_release_trybot',
'Linux GPU TSAN Release': 'gpu_fyi_tests_release_trybot_tsan',
'Mac GPU ASAN Release': 'gpu_fyi_tests_release_trybot_asan',
},
@@ -362,6 +377,7 @@
},
'chromium.perf.fyi': {
+ 'Linux Compile FYI': 'official_goma_perf',
'Android Builder FYI': 'official_goma_minimal_symbols_android',
'Android arm64 Builder FYI': 'official_goma_minimal_symbols_android_arm64',
'Win Builder FYI': 'official_goma',
@@ -371,12 +387,18 @@
'Battor Agent Win': 'official_goma_minimal_symbols_clang',
},
+ 'chromium.sandbox': {
+ 'Linux Builder SANDBOX': 'release_bot',
+ 'Deterministic Linux SANDBOX': 'release_bot',
+ 'Fuchsia Compile SANDBOX': 'release_bot_fuchsia',
+ },
+
'chromium.swarm': {
'Android N5 Swarm': 'android_release_bot_minimal_symbols',
'Android N5X Swarm': 'android_release_bot_minimal_symbols_arm64',
'Linux Swarm': 'release_bot',
'Mac Swarm': 'release_bot_mac_strip',
- 'Windows Swarm': 'release_bot_x86_minimal_symbols',
+ 'Windows Swarm': 'release_bot_minimal_symbols',
},
'client.nacl.sdk': {
@@ -425,7 +447,6 @@
'WebKit Mac Builder': 'release_bot',
'WebKit Mac10.11 (retina)': 'release_bot',
'WebKit Mac10.12': 'release_bot',
- 'WebKit Mac10.9': 'release_bot',
'WebKit Win Builder (dbg)': 'debug_bot_x86_minimal_symbols',
'WebKit Win Builder': 'release_bot_x86_minimal_symbols',
'WebKit Win x64 Builder (dbg)': 'debug_bot_minimal_symbols',
@@ -483,10 +504,6 @@
'linux_trusty_blink_compile_rel': 'release_bot_minimal_symbols',
'linux_trusty_blink_dbg': 'debug_trybot',
'linux_trusty_blink_rel': 'release_bot_minimal_symbols',
- 'mac10.9_blink_compile_dbg': 'debug_trybot',
- 'mac10.9_blink_compile_rel': 'release_bot_minimal_symbols',
- 'mac10.9_blink_dbg': 'debug_trybot',
- 'mac10.9_blink_rel': 'release_bot_minimal_symbols',
'mac10.10_blink_rel': 'release_bot_minimal_symbols',
'mac10.11_blink_rel': 'release_bot_minimal_symbols',
'mac10.11_retina_blink_rel': 'release_bot_minimal_symbols',
@@ -528,7 +545,7 @@
'tryserver.chromium.angle': {
'android_angle_rel_ng': 'gpu_tests_android_release_trybot_arm64',
'android_angle_deqp_rel_ng': 'deqp_android_release_trybot_arm64',
- 'linux_angle_chromeos_rel_ng': 'gpu_fyi_tests_chromeos_cros_release_trybot',
+ 'linux_angle_ozone_rel_ng': 'gpu_fyi_tests_ozone_linux_system_gbm_libdrm_release_trybot',
'linux_angle_dbg_ng': 'gpu_fyi_tests_debug_trybot',
'linux_angle_deqp_rel_ng': 'deqp_release_trybot',
'linux_angle_rel_ng': 'gpu_fyi_tests_release_trybot',
@@ -543,8 +560,6 @@
},
'tryserver.chromium.linux': {
- 'Chromium Linux Codesearch Builder': 'codesearch',
- 'ChromiumOS Codesearch Builder': 'codesearch',
'cast_shell_linux': 'cast_release_trybot',
'cast_shell_audio_linux': 'cast_audio_release_trybot',
'chromeos_amd64-generic_chromium_compile_only_ng': 'cros_chrome_sdk',
@@ -568,7 +583,6 @@
'linux_chromium_chromeos_compile_rel_ng': 'chromeos_with_codecs_release_trybot',
'linux_chromium_chromeos_dbg_ng': 'chromeos_with_codecs_debug_trybot',
'linux_chromium_chromeos_msan_rel_ng': 'chromeos_msan_release_bot',
- 'linux_chromium_chromeos_ozone_rel_ng': 'chromeos_with_codecs_release_trybot',
'linux_chromium_chromeos_rel_ng': 'chromeos_with_codecs_release_trybot',
'linux_chromium_clobber_deterministic': 'release_trybot',
'linux_chromium_clobber_rel_ng': 'release_trybot',
@@ -599,7 +613,7 @@
'linux_layout_tests_layout_ng': 'release_bot',
'linux_mojo': 'release_trybot',
- 'linux_mojo_chromeos': 'release_trybot',
+ 'linux_mojo_chromeos': 'chromeos_with_codecs_release_trybot',
'linux_nacl_sdk_build': 'release_bot',
'linux_nacl_sdk': 'release_bot',
'linux_optional_gpu_tests_rel': 'gpu_fyi_tests_release_trybot',
@@ -614,6 +628,7 @@
'ios-simulator-eg': 'ios',
'ios-simulator-cronet': 'ios',
'ios-simulator-xcode-clang': 'ios',
+ 'mac_chromium_10.10': 'gpu_tests_release_trybot',
'mac_chromium_10.10_macviews': 'mac_views_browser_release_trybot',
'mac_chromium_10.12_rel_ng': 'gpu_tests_release_trybot',
'mac_chromium_archive_rel_ng': 'release_bot_mac_strip',
@@ -630,6 +645,8 @@
},
'tryserver.chromium.perf': {
+ 'Android Compile': 'official_goma_minimal_symbols_android',
+ 'Android arm64 Compile': 'official_goma_minimal_symbols_android_arm64',
'android_arm64_perf_bisect_builder': 'official_goma_minimal_symbols_android_arm64',
'android_fyi_perf_bisect': 'official_goma_minimal_symbols_android',
'android_nexus5X_perf_bisect': 'official_goma_minimal_symbols_android',
@@ -641,6 +658,7 @@
'android_perf_bisect_builder': 'official_goma_minimal_symbols_android',
'android_s5_perf_bisect': 'official_goma_minimal_symbols_android',
'android_webview_arm64_aosp_perf_bisect': 'official_goma_minimal_symbols_android',
+ 'Linux Builder': 'official_goma_perf',
'linux_fyi_perf_bisect': 'official_goma',
'linux_perf_bisect': 'official_goma',
'linux_perf_bisect_builder': 'official_goma',
@@ -653,6 +671,8 @@
'mac_perf_bisect_builder': 'official_goma',
'mac_retina_perf_bisect': 'official_goma',
'mac_retina_perf_cq': 'official_goma',
+ 'Win Builder': 'official_goma_x86',
+ 'Win x64 Builder': 'official_goma',
'win_8_perf_bisect': 'official_goma_x86',
'win_fyi_perf_bisect': 'official_goma_x86',
'win_perf_bisect': 'official_goma_x86',
@@ -1041,8 +1061,8 @@
'cfi_full', 'cfi_diag', 'thin_lto', 'release', 'static', 'dcheck_always_on', 'goma',
],
- 'cfi_full_diag_recover_icall_static': [
- 'cfi_full', 'cfi_diag', 'cfi_recover', 'cfi_icall', 'thin_lto', 'static',
+ 'cfi_full_diag_icall_release_static_dcheck_always_on': [
+ 'cfi_full', 'cfi_diag', 'cfi_icall', 'thin_lto', 'release', 'static', 'dcheck_always_on',
],
'chromeos_asan_lsan_edge_fuzzer_v8_heap_release_bot': [
@@ -1117,6 +1137,10 @@
'clang_tot', 'cfi_full', 'cfi_diag', 'thin_lto', 'release', 'static', 'dcheck_always_on',
],
+ 'clang_tot_cfi_full_cfi_diag_thin_lto_release_static_dcheck_always_on_x86': [
+ 'clang_tot', 'cfi_full', 'cfi_diag', 'thin_lto', 'release', 'static', 'dcheck_always_on', 'x86',
+ ],
+
'clang_tot_edge_ubsan_no_recover_hack_static_release': [
'clang_tot', 'edge', 'ubsan_no_recover_hack', 'static', 'release',
],
@@ -1127,6 +1151,10 @@
'clang_tot', 'full_symbols', 'shared', 'release',
],
+ 'clang_tot_msan_release': [
+ 'clang_tot', 'msan', 'release',
+ ],
+
'clang_tot_shared_debug': [
'clang_tot', 'shared', 'debug',
],
@@ -1165,6 +1193,10 @@
'clang_tot', 'minimal_symbols', 'shared', 'release', 'x86', 'dcheck_always_on',
],
+ 'clang_tot_official_full_symbols_thin_lto_static_use_lld': [
+ 'clang_tot', 'official', 'full_symbols', 'thin_lto', 'static', 'use_lld',
+ ],
+
'clang_tot_official_minimal_symbols_static_release': [
'clang_tot', 'official', 'minimal_symbols', 'static', 'release',
],
@@ -1173,6 +1205,10 @@
'clang_tot', 'official', 'minimal_symbols', 'static', 'release', 'x86',
],
+ 'clang_tot_release_full_symbols_thin_lto_static_use_lld': [
+ 'clang_tot', 'release', 'full_symbols', 'thin_lto', 'static', 'use_lld',
+ ],
+
'clang_tot_lld_release_shared': [
# TODO(crbug.com/605819): Enable debug info in release builds.
'clang_tot', 'release', 'shared', 'use_lld',
@@ -1187,11 +1223,6 @@
'error',
],
- 'codesearch': [
- # The Codesearch bots run their own recipe and invoke GN directly.
- 'error',
- ],
-
'codesearch_gen_chromium_android_bot': [
'goma', 'clang', 'shared', 'debug', 'minimal_symbols', 'arm', 'android_without_codecs',
],
@@ -1283,10 +1314,6 @@
'angle_deqp_tests', 'release_trybot',
],
- 'gpu_fyi_tests_chromeos_cros_release_trybot': [
- 'gpu_fyi_tests', 'chromeos', 'release_trybot',
- ],
-
'gpu_fyi_tests_debug_trybot': [
'gpu_fyi_tests', 'debug_trybot',
],
@@ -1366,6 +1393,10 @@
# build files.
'ios': [ 'error'],
+ 'jumbo_release_bot': [
+ 'jumbo', 'release_bot'
+ ],
+
'linux_chromium_analysis': [
'analysis'
],
@@ -1452,7 +1483,7 @@
],
'release_afl_asan': [
- 'release', 'afl', 'asan', 'chromeos_codecs', 'pdf_xfa', 'disable_nacl',
+ 'release', 'afl', 'asan', 'chromeos_codecs', 'pdf_xfa', 'disable_nacl', 'optimize_for_fuzzing',
],
'release_bot': [
@@ -1543,10 +1574,10 @@
'syzyasan_no_pch_release_x86': [
'syzyasan', 'no_pch', 'release', 'x86',
- ],
-
- 'thin_lto_clang_tot_full_symbols_release_static_use_lld': [
- 'thin_lto', 'clang_tot', 'full_symbols', 'release', 'static', 'use_lld',
+ # The SyzyASAN build may default dcheck_always_on to true to produce
+ # official builds with DCHECK on. To counter this, the flag is fixed
+ # here. For details see https://crbug.com/596231#c71.
+ 'disable_dcheck_always_on',
],
'tsan_disable_nacl_debug_bot': [
@@ -1737,6 +1768,10 @@
'gn_args': 'dcheck_always_on=true',
},
+ 'disable_dcheck_always_on': {
+ 'gn_args': 'dcheck_always_on=false',
+ },
+
'debug': {
'gn_args': 'is_debug=true',
},
@@ -1843,16 +1878,16 @@
'gn_args': 'emma_coverage=true emma_filter="org.chromium.*"',
},
+ 'jumbo': {
+ 'gn_args': 'use_jumbo_build=true',
+ },
+
'libfuzzer': { 'gn_args': 'use_libfuzzer=true' },
'lsan': {
'gn_args': 'is_lsan=true',
},
- 'lto': {
- 'gn_args': 'allow_posix_link_time_opt=true',
- },
-
'mac_strip': {
'gn_args': 'enable_stripping=true',
},
@@ -1969,7 +2004,6 @@
'thin_lto': {
'gn_args': 'use_thin_lto=true',
- 'mixins': ['lto'],
},
'tsan': {
@@ -2023,6 +2057,6 @@
},
'luci_tryservers': {
- 'chromium.try': [ 'LUCI linux_chromium_rel_ng' ],
+ 'chromium.try': [ 'linux_chromium_rel_ng' ],
},
}
diff --git a/chromium/tools/metrics/BUILD.gn b/chromium/tools/metrics/BUILD.gn
index 9e131b7a72b..fbad35e3f70 100644
--- a/chromium/tools/metrics/BUILD.gn
+++ b/chromium/tools/metrics/BUILD.gn
@@ -43,3 +43,71 @@ group("metrics_metadata") {
":rappor_xml",
]
}
+
+# This group defines the isolate files needed to run metrics_python_tests.py on
+# on bots. This also tells the build system when the tests should be re-run -
+# when one of the dependent files changes.
+#
+# When adding new entries to this test suite, you can test things locally with
+# isolation using the following command:
+#
+# tools/mb/mb.py run out/gn metrics_python_tests -- \
+# --isolated-script-test-output=/tmp/output.json
+#
+group("metrics_python_tests") {
+ data = [
+ # The run_isolated_script_test.py script and its dependencies.
+ "//testing/scripts/run_isolated_script_test.py",
+ "//testing/scripts/common.py",
+ "//testing/xvfb.py",
+ "//testing/test_env.py",
+ "//third_party/typ/",
+
+ # Scripts we depend on. Their unit tests are also included.
+ "//tools/json_comment_eater/json_comment_eater.py",
+ "//tools/json_comment_eater/json_comment_eater_test.py",
+ "//tools/json_comment_eater/everything.json",
+ "//tools/json_comment_eater/everything_expected.json",
+ "//tools/json_to_struct/element_generator.py",
+ "//tools/json_to_struct/element_generator_test.py",
+ "//tools/json_to_struct/json_to_struct.py",
+ "//tools/json_to_struct/struct_generator.py",
+ "//tools/json_to_struct/struct_generator_test.py",
+
+ # The metrics_python_tests.py runner and its dependencies.
+ "//tools/metrics/metrics_python_tests.py",
+
+ "//tools/metrics/actions/action_utils.py",
+ "//tools/metrics/actions/extract_actions.py",
+ "//tools/metrics/actions/extract_actions_test.py",
+ "//tools/metrics/actions/pretty_print.py",
+ "//tools/metrics/actions/print_style.py",
+
+ "//tools/metrics/common/diff_util.py",
+ "//tools/metrics/common/models.py",
+ "//tools/metrics/common/path_util.py",
+ "//tools/metrics/common/presubmit_util.py",
+ "//tools/metrics/common/pretty_print_xml.py",
+
+ "//tools/metrics/histograms/extract_histograms.py",
+ "//tools/metrics/histograms/generate_expired_histograms_array.py",
+ "//tools/metrics/histograms/generate_expired_histograms_array_unittest.py",
+ "//tools/metrics/histograms/merge_xml.py",
+
+ "//tools/metrics/ukm/model.py",
+ "//tools/metrics/ukm/pretty_print_test.py",
+
+ "//tools/python/google/path_utils.py",
+
+ "//tools/variations/fieldtrial_to_struct_unittest.py",
+ "//tools/variations/fieldtrial_to_struct.py",
+ "//tools/variations/fieldtrial_util_unittest.py",
+ "//tools/variations/fieldtrial_util.py",
+ "//tools/variations/unittest_data/expected_output.cc",
+ "//tools/variations/unittest_data/expected_output.h",
+ "//tools/variations/unittest_data/test_config.json",
+ "//components/variations/field_trial_config/field_trial_testing_config_schema.json",
+ "//components/variations/service/generate_ui_string_overrider.py",
+ "//components/variations/service/generate_ui_string_overrider_unittest.py",
+ ]
+}
diff --git a/chromium/tools/msan/blacklist.txt b/chromium/tools/msan/blacklist.txt
index 40ea4b83a07..feada5c171f 100644
--- a/chromium/tools/msan/blacklist.txt
+++ b/chromium/tools/msan/blacklist.txt
@@ -20,5 +20,5 @@ fun:unpack_RGBA8888
fun:unpack_RGB888
# False positives due to use of linux_syscall_support. http://crbug.com/394028
-src:*/breakpad/src/*
+src:*/third_party/breakpad/breakpad/src/*
src:*/components/crash/content/app/breakpad_linux.cc
diff --git a/chromium/tools/perf/chrome_telemetry_build/BUILD.gn b/chromium/tools/perf/chrome_telemetry_build/BUILD.gn
index 54a039c59ab..850be947745 100644
--- a/chromium/tools/perf/chrome_telemetry_build/BUILD.gn
+++ b/chromium/tools/perf/chrome_telemetry_build/BUILD.gn
@@ -38,14 +38,14 @@ group("telemetry_chrome_test") {
}
if (is_linux) {
- data_deps += [ "//breakpad:dump_syms($host_toolchain)" ]
+ data_deps += [ "//third_party/breakpad:dump_syms($host_toolchain)" ]
}
if (is_mac) {
data_deps += [
- "//breakpad:dump_syms",
"//chrome:chrome_framework",
"//chrome:chrome_helper_app",
+ "//third_party/breakpad:dump_syms",
"//third_party/crashpad/crashpad/tools:crashpad_database_util",
]
}
diff --git a/chromium/tools/perf/contrib/vr_benchmarks/BUILD.gn b/chromium/tools/perf/contrib/vr_benchmarks/BUILD.gn
index 9b1c8889ad5..029bbe685eb 100644
--- a/chromium/tools/perf/contrib/vr_benchmarks/BUILD.gn
+++ b/chromium/tools/perf/contrib/vr_benchmarks/BUILD.gn
@@ -8,8 +8,11 @@ group("vr_perf_tests") {
testonly = true
data = [
"./__init__.py",
- "./vr_benchmarks/",
- "./vr_page_sets/",
+ "./shared_android_vr_page_state.py",
+ "./vr_benchmarks.py",
+ "./vr_browsing_mode_pages.py",
+ "./vr_sample_page.py",
+ "./webvr_sample_pages.py",
"//chrome/android/shared_preference_files/test/",
"//third_party/gvr-android-sdk/test-apks/vr_services/vr_services_current.apk",
"//chrome/test/data/vr/webvr_info/samples/",
diff --git a/chromium/tools/roll_angle.py b/chromium/tools/roll_angle.py
index aa48c39bf22..8928d084488 100755
--- a/chromium/tools/roll_angle.py
+++ b/chromium/tools/roll_angle.py
@@ -44,7 +44,7 @@ upload.verbosity = 0 # Errors only.
CHROMIUM_GIT_URL = 'https://chromium.googlesource.com/chromium/src.git'
CL_ISSUE_RE = re.compile('^Issue number: ([0-9]+) \((.*)\)$')
-RIETVELD_URL_RE = re.compile('^https?://(.*)/(.*)')
+REVIEW_URL_RE = re.compile('^https?://(.*)/(.*)')
ROLL_BRANCH_NAME = 'special_angle_roll_branch'
TRYJOB_STATUS_SLEEP_SECONDS = 30
@@ -54,7 +54,7 @@ ANGLE_PATH = os.path.join('third_party', 'angle')
CommitInfo = collections.namedtuple('CommitInfo', ['git_commit',
'git_repo_url'])
-CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'rietveld_server'])
+CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'review_server'])
def _VarLookup(local_scope):
return lambda var_name: local_scope['vars'][var_name]
@@ -190,13 +190,13 @@ class AutoRoller(object):
issue_number = int(m.group(1))
url = m.group(2)
- # Parse the Rietveld host from the URL.
- m = RIETVELD_URL_RE.match(url)
+ # Parse the codereview host from the URL.
+ m = REVIEW_URL_RE.match(url)
if not m:
- logging.error('Cannot parse Rietveld host from URL: %s', url)
+ logging.error('Cannot parse codereview host from URL: %s', url)
sys.exit(-1)
- rietveld_server = m.group(1)
- return CLInfo(issue_number, url, rietveld_server)
+ review_server = m.group(1)
+ return CLInfo(issue_number, url, review_server)
def _GetCurrentBranchName(self):
return self._RunCommand(
diff --git a/chromium/tools/roll_swiftshader.py b/chromium/tools/roll_swiftshader.py
index 78e28449311..447d0ac8e1d 100755
--- a/chromium/tools/roll_swiftshader.py
+++ b/chromium/tools/roll_swiftshader.py
@@ -44,7 +44,6 @@ sys.path.insert(0, os.path.join(SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
import roll_dep_svn
-from gclient import GClientKeywords
from third_party import upload
# Avoid depot_tools/third_party/upload.py print verbose messages.
@@ -52,7 +51,7 @@ upload.verbosity = 0 # Errors only.
CHROMIUM_GIT_URL = 'https://chromium.googlesource.com/chromium/src.git'
CL_ISSUE_RE = re.compile('^Issue number: ([0-9]+) \((.*)\)$')
-RIETVELD_URL_RE = re.compile('^https?://(.*)/(.*)')
+REVIEW_URL_RE = re.compile('^https?://(.*)/(.*)')
ROLL_BRANCH_NAME = 'special_swiftshader_roll_branch'
TRYJOB_STATUS_SLEEP_SECONDS = 30
@@ -62,7 +61,10 @@ SWIFTSHADER_PATH = os.path.join('third_party', 'swiftshader')
CommitInfo = collections.namedtuple('CommitInfo', ['git_commit',
'git_repo_url'])
-CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'rietveld_server'])
+CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'review_server'])
+
+def _VarLookup(local_scope):
+ return lambda var_name: local_scope['vars'][var_name]
def _PosixPath(path):
"""Convert a possibly-Windows path to a posix-style path."""
@@ -86,9 +88,8 @@ def _ParseDepsFile(filename):
def _ParseDepsDict(deps_content):
local_scope = {}
- var = GClientKeywords.VarImpl({}, local_scope)
global_scope = {
- 'Var': var.Lookup,
+ 'Var': _VarLookup(local_scope),
'deps_os': {},
}
exec(deps_content, global_scope, local_scope)
@@ -197,13 +198,13 @@ class AutoRoller(object):
issue_number = int(m.group(1))
url = m.group(2)
- # Parse the Rietveld host from the URL.
- m = RIETVELD_URL_RE.match(url)
+ # Parse the codereview host from the URL.
+ m = REVIEW_URL_RE.match(url)
if not m:
- logging.error('Cannot parse Rietveld host from URL: %s', url)
+ logging.error('Cannot parse codereview host from URL: %s', url)
sys.exit(-1)
- rietveld_server = m.group(1)
- return CLInfo(issue_number, url, rietveld_server)
+ review_server = m.group(1)
+ return CLInfo(issue_number, url, review_server)
def _GetCurrentBranchName(self):
return self._RunCommand(
diff --git a/chromium/tools/roll_webgl_conformance.py b/chromium/tools/roll_webgl_conformance.py
index 326f2c4d360..dad46670bc1 100755
--- a/chromium/tools/roll_webgl_conformance.py
+++ b/chromium/tools/roll_webgl_conformance.py
@@ -44,7 +44,7 @@ upload.verbosity = 0 # Errors only.
CHROMIUM_GIT_URL = 'https://chromium.googlesource.com/chromium/src.git'
CL_ISSUE_RE = re.compile('^Issue number: ([0-9]+) \((.*)\)$')
-RIETVELD_URL_RE = re.compile('^https?://(.*)/(.*)')
+REVIEW_URL_RE = re.compile('^https?://(.*)/(.*)')
ROLL_BRANCH_NAME = 'special_webgl_roll_branch'
TRYJOB_STATUS_SLEEP_SECONDS = 30
@@ -56,7 +56,7 @@ WEBGL_REVISION_TEXT_FILE = os.path.join(
CommitInfo = collections.namedtuple('CommitInfo', ['git_commit',
'git_repo_url'])
-CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'rietveld_server'])
+CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'review_server'])
def _VarLookup(local_scope):
@@ -189,13 +189,13 @@ class AutoRoller(object):
issue_number = int(m.group(1))
url = m.group(2)
- # Parse the Rietveld host from the URL.
- m = RIETVELD_URL_RE.match(url)
+ # Parse the codereview host from the URL.
+ m = REVIEW_URL_RE.match(url)
if not m:
- logging.error('Cannot parse Rietveld host from URL: %s', url)
+ logging.error('Cannot parse codereview host from URL: %s', url)
sys.exit(-1)
- rietveld_server = m.group(1)
- return CLInfo(issue_number, url, rietveld_server)
+ review_server = m.group(1)
+ return CLInfo(issue_number, url, review_server)
def _GetCurrentBranchName(self):
return self._RunCommand(
diff --git a/chromium/tools/roll_webrtc.py b/chromium/tools/roll_webrtc.py
index 37c1bc57556..23eb39f1215 100755
--- a/chromium/tools/roll_webrtc.py
+++ b/chromium/tools/roll_webrtc.py
@@ -18,7 +18,6 @@ SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
-import rietveld
import roll_dep_svn
from third_party import upload
@@ -26,9 +25,9 @@ from third_party import upload
upload.verbosity = 0 # Errors only.
CHROMIUM_GIT_URL = 'https://chromium.googlesource.com/chromium/src.git'
-COMMIT_POSITION_RE = re.compile('^Cr-Original-Commit-Position: .*#([0-9]+).*$')
+COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$')
CL_ISSUE_RE = re.compile('^Issue number: ([0-9]+) \((.*)\)$')
-RIETVELD_URL_RE = re.compile('^https?://(.*)/(.*)')
+REVIEW_URL_RE = re.compile('^https?://(.*)/(.*)')
ROLL_BRANCH_NAME = 'special_webrtc_roll_branch'
TRYJOB_STATUS_SLEEP_SECONDS = 30
@@ -58,7 +57,7 @@ FAILURE_STATUS = (2, 4, 5)
CommitInfo = collections.namedtuple('CommitInfo', ['commit_position',
'git_commit',
'git_repo_url'])
-CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'rietveld_server'])
+CLInfo = collections.namedtuple('CLInfo', ['issue', 'url', 'review_server'])
def _VarLookup(local_scope):
@@ -105,44 +104,6 @@ def _ParseDepsDict(deps_content):
return local_scope
-def _WaitForTrybots(issue, rietveld_server):
- """Wait until all trybots have passed or at least one have failed.
-
- Returns:
- An exit code of 0 if all trybots passed or non-zero otherwise.
- """
- assert type(issue) is int
- print 'Trybot status for https://%s/%d:' % (rietveld_server, issue)
- remote = rietveld.Rietveld('https://' + rietveld_server, None, None)
-
- attempt = 0
- max_tries = 60*60/TRYJOB_STATUS_SLEEP_SECONDS # Max one hour
- while attempt < max_tries:
- # Get patches for the issue so we can use the latest one.
- data = remote.get_issue_properties(issue, messages=False)
- patchsets = data['patchsets']
-
- # Get trybot status for the latest patch set.
- data = remote.get_patchset_properties(issue, patchsets[-1])
-
- tryjob_results = data['try_job_results']
- if len(tryjob_results) == 0:
- logging.debug('No trybots have yet been triggered for https://%s/%d' ,
- rietveld_server, issue)
- else:
- _PrintTrybotsStatus(tryjob_results)
- if any(r['result'] in FAILURE_STATUS for r in tryjob_results):
- logging.error('Found failing tryjobs (see above)')
- return 1
- if all(r['result'] in SUCCESS_STATUS for r in tryjob_results):
- return 0
-
- logging.debug('Waiting for %d seconds before next check...',
- TRYJOB_STATUS_SLEEP_SECONDS)
- time.sleep(TRYJOB_STATUS_SLEEP_SECONDS)
- attempt += 1
-
-
def _PrintTrybotsStatus(tryjob_results):
status_to_name = {}
for trybot_result in tryjob_results:
@@ -154,6 +115,7 @@ def _PrintTrybotsStatus(tryjob_results):
for status,name_list in status_to_name.iteritems():
print '%s: %s' % (status, ','.join(sorted(name_list)))
+
class AutoRoller(object):
def __init__(self, chromium_src):
self._chromium_src = chromium_src
@@ -200,17 +162,20 @@ class AutoRoller(object):
webrtc_header = 'Roll WebRTC %s:%s (%d commit%s)' % (
webrtc_current.commit_position, webrtc_new.commit_position,
nb_commits, 's' if nb_commits > 1 else '')
-
+ git_author = self._RunCommand(
+ ['git', 'config', 'user.email'],
+ working_dir=self._chromium_src).splitlines()[0]
description = ('%s\n\n'
'Changes: %s\n\n'
'$ %s\n'
'%s\n'
- 'TBR=\n'
+ 'TBR=%s\n'
'CQ_INCLUDE_TRYBOTS=%s\n') % (
webrtc_header,
webrtc_changelog_url,
' '.join(git_log_cmd),
git_log,
+ git_author,
EXTRA_TRYBOTS)
return description
@@ -242,13 +207,13 @@ class AutoRoller(object):
issue_number = int(m.group(1))
url = m.group(2)
- # Parse the Rietveld host from the URL.
- m = RIETVELD_URL_RE.match(url)
+ # Parse the codereview host from the URL.
+ m = REVIEW_URL_RE.match(url)
if not m:
- logging.error('Cannot parse Rietveld host from URL: %s', url)
+ logging.error('Cannot parse codereview host from URL: %s', url)
sys.exit(-1)
- rietveld_server = m.group(1)
- return CLInfo(issue_number, url, rietveld_server)
+ review_server = m.group(1)
+ return CLInfo(issue_number, url, review_server)
def _GetCurrentBranchName(self):
return self._RunCommand(
@@ -321,15 +286,13 @@ class AutoRoller(object):
self._RunCommand(['git', 'add', '--update', '.'])
self._RunCommand(['git', 'commit', '-m', description])
logging.debug('Uploading changes...')
- self._RunCommand(['git', 'cl', 'upload'],
- extra_env={'EDITOR': 'true'})
- cl_info = self._GetCLInfo()
- logging.debug('Issue: %d URL: %s', cl_info.issue, cl_info.url)
-
+ upload_cmd = ['git', 'cl', 'upload']
if not dry_run and not no_commit:
logging.debug('Sending the CL to the CQ...')
- self._RunCommand(['git', 'cl', 'set_commit'])
- logging.debug('Sent the CL to the CQ. Monitor here: %s', cl_info.url)
+ upload_cmd.extend(['--use-commit-queue', '--send-mail'])
+ self._RunCommand(upload_cmd, extra_env={'EDITOR': 'true'})
+ cl_info = self._GetCLInfo()
+ logging.debug('Issue: %d URL: %s', cl_info.issue, cl_info.url)
# TODO(kjellander): Checkout masters/previous branches again.
return 0
@@ -385,13 +348,6 @@ class AutoRoller(object):
self._RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
return 0
- def WaitForTrybots(self):
- active_branch, _ = self._GetBranches()
- if active_branch != ROLL_BRANCH_NAME:
- self._RunCommand(['git', 'checkout', ROLL_BRANCH_NAME])
- cl_info = self._GetCLInfo()
- return _WaitForTrybots(cl_info.issue, cl_info.rietveld_server)
-
def main():
parser = argparse.ArgumentParser(
@@ -404,12 +360,6 @@ def main():
help=('Don\'t send the CL to the CQ. This is useful if additional changes '
'are needed to the CL (like for API changes).'),
action='store_true')
- parser.add_argument('--wait-for-trybots',
- help=('Waits until all trybots from a previously created roll are either '
- 'successful or at least one has failed. This is useful to be able to '
- 'continuously run this script but not initiating new rolls until a '
- 'previous one is known to have passed or failed.'),
- action='store_true')
parser.add_argument('--close-previous-roll', action='store_true',
help='Abort a previous roll if one exists.')
parser.add_argument('--dry-run', action='store_true', default=False,
@@ -433,8 +383,6 @@ def main():
autoroller = AutoRoller(SRC_DIR)
if args.abort:
return autoroller.Abort()
- elif args.wait_for_trybots:
- return autoroller.WaitForTrybots()
else:
return autoroller.PrepareRoll(args.dry_run, args.ignore_checks,
args.no_commit, args.close_previous_roll,
diff --git a/chromium/tools/safely-roll-deps.py b/chromium/tools/safely-roll-deps.py
index 73941238796..15e3ab6c3e8 100755
--- a/chromium/tools/safely-roll-deps.py
+++ b/chromium/tools/safely-roll-deps.py
@@ -4,7 +4,7 @@
# found in the LICENSE file.
"""Generate a CL to roll a DEPS entry to the specified revision number and post
-it to Rietveld so that the CL will land automatically if it passes the
+it for review so that the CL will land automatically if it passes the
commit-queue's checks.
"""
@@ -122,7 +122,6 @@ def main():
if not options.dry_run:
prnt_subprocess.check_call(['git', 'fetch', 'origin'])
- prnt_subprocess.call(['git', 'svn', 'fetch'])
branch_cmd = ['git', 'checkout', '-b', new_branch, options.upstream]
if options.force:
branch_cmd.append('-f')
diff --git a/chromium/tools/traffic_annotation/README.md b/chromium/tools/traffic_annotation/README.md
new file mode 100644
index 00000000000..6a4b59633ee
--- /dev/null
+++ b/chromium/tools/traffic_annotation/README.md
@@ -0,0 +1,66 @@
+# Running the traffic annotation checkers
+
+The traffic annotation checkers ensure that every operation in the
+code base that talks to the network is properly annotated in the
+source code, so that we can produce reports of what Chromium talks to
+over the network and why.
+
+To run the checkers, you need a populated build directory, and then
+you do:
+
+```
+$ python tools/annotation_checker/presubmit_checks.py --build-path out/Default
+```
+
+## Building the annotation checker.
+
+The annotation checkers are built as Clang tools. We do not want every
+developer to have to build clang, and so we store pre-built binaries
+in a Google Cloud Storage bucket and retrieve them via gclient hooks.
+
+To roll new versions of the binaries, assuming you have write access
+to the chromium-tools-traffic_annotation bucket, run:
+
+```bash
+git new-branch roll_traffic_annotation_tools
+python tools/clang/scripts/update.py --bootstrap --force-local-build \
+ --without-android --extra-tools traffic_annotation_extractor
+cp third_party/llvm-build/Release+Asserts/bin/traffic_annotation_extractor \
+ tools/traffic_annotation/bin/linux64/
+
+# These GN flags produce an optimized, stripped binary that has no dependency
+# on glib.
+gn gen --args='is_official_build=true use_ozone=true' out/Default
+
+ninja -C out/Default traffic_annotation_auditor
+cp -p out/Default/traffic_annotation_auditor \
+ tools/traffic_annotation/bin/linux64
+
+strip tools/traffic_annotation/bin/linux64/traffic_annotation_{auditor,extractor}
+
+third_party/depot_tools/upload_to_google_storage.py \
+ -b chromium-tools-traffic_annotation \
+ tools/traffic_annotation/bin/linux64/traffic_annotation_{auditor,extractor}
+sed -i '/^CLANG_REVISION=/d' tools/traffic_annotation/README.md
+sed -i '/^LASTCHANGE=/d' tools/traffic_annotation/README.md
+grep '^CLANG_REVISION=' tools/clang/scripts/update.py >> tools/traffic_annotation/README.md
+cat build/util/LASTCHANGE >> tools/traffic_annotation/README.md
+git commit -a -m 'Roll traffic_annotation checkers'
+git cl upload
+
+```
+
+and land the resulting CL.
+
+The following two lines will be updated by the above script, and the modified
+README should be committed along with the updated .sha1 checksums.
+
+In the event that clang changes something that requires this tool to be
+rebuilt (or for some other reason the tests don't work correctly), please
+disable this test by setting the `test_is_enabled` flag to False in
+//tools/traffic_annotation/scripts_check_annotation.py, and file a bug
+and cc the people listed in OWNERS; they'll be on the hook to rebuild and
+re-enable the test.
+
+CLANG_REVISION = '308728'
+LASTCHANGE=e684c0cebda9d00d4b5d655920c516f79d857103-refs/heads/master@{#507132}
diff --git a/chromium/tools/traffic_annotation/auditor/BUILD.gn b/chromium/tools/traffic_annotation/auditor/BUILD.gn
index 4e593123a8a..9c10ed169e8 100644
--- a/chromium/tools/traffic_annotation/auditor/BUILD.gn
+++ b/chromium/tools/traffic_annotation/auditor/BUILD.gn
@@ -57,6 +57,9 @@ source_set("auditor_sources") {
"traffic_annotation_file_filter.cc",
"traffic_annotation_file_filter.h",
]
+ data = [
+ "safe_list.txt",
+ ]
public_deps = [
":traffic_annotation",
@@ -71,7 +74,6 @@ executable("traffic_annotation_auditor") {
sources = [
"traffic_annotation_auditor_ui.cc",
]
-
deps = [
":auditor_sources",
]
@@ -81,6 +83,35 @@ test("traffic_annotation_auditor_unittests") {
sources = [
"traffic_annotation_auditor_unittest.cc",
]
+ data = [
+ "tests/extractor_outputs/bad_assignment1.txt",
+ "tests/extractor_outputs/bad_assignment2.txt",
+ "tests/extractor_outputs/bad_call.txt",
+ "tests/extractor_outputs/bad_syntax_annotation1.txt",
+ "tests/extractor_outputs/bad_syntax_annotation2.txt",
+ "tests/extractor_outputs/bad_syntax_annotation3.txt",
+ "tests/extractor_outputs/bad_syntax_annotation4.txt",
+ "tests/extractor_outputs/fatal_annotation1.txt",
+ "tests/extractor_outputs/fatal_annotation2.txt",
+ "tests/extractor_outputs/fatal_annotation3.txt",
+ "tests/extractor_outputs/good_assignment.txt",
+ "tests/extractor_outputs/good_branched_completing_annotation.txt",
+ "tests/extractor_outputs/good_call.txt",
+ "tests/extractor_outputs/good_complete_annotation.txt",
+ "tests/extractor_outputs/good_completing_annotation.txt",
+ "tests/extractor_outputs/good_partial_annotation.txt",
+ "tests/extractor_outputs/good_test_annotation.txt",
+ "tests/extractor_outputs/missing_annotation.txt",
+ "tests/extractor_outputs/no_annotation.txt",
+ "tests/git_list.txt",
+ "tests/gn_list_negative.txt",
+ "tests/gn_list_positive.txt",
+ "tests/irrelevant_file_content.cc",
+ "tests/irrelevant_file_content.mm",
+ "tests/irrelevant_file_name.txt",
+ "tests/relevant_file_name_and_content.cc",
+ "tests/relevant_file_name_and_content.mm",
+ ]
deps = [
":auditor_sources",
"//base/test:run_all_unittests",
diff --git a/chromium/tools/traffic_annotation/auditor/auditor_result.cc b/chromium/tools/traffic_annotation/auditor/auditor_result.cc
index b311d30b43e..99f88059c96 100644
--- a/chromium/tools/traffic_annotation/auditor/auditor_result.cc
+++ b/chromium/tools/traffic_annotation/auditor/auditor_result.cc
@@ -86,6 +86,13 @@ std::string AuditorResult::ToText() const {
"word and should be changed.",
details_[0].c_str(), file_path_.c_str(), line_);
+ case AuditorResult::Type::ERROR_DEPRECATED_UNIQUE_ID_HASH_CODE:
+ DCHECK(details_.size());
+ return base::StringPrintf(
+ "Unique id '%s' in '%s:%i' has a hash code similar to a deprecated "
+ "unique id and should be changed.",
+ details_[0].c_str(), file_path_.c_str(), line_);
+
case AuditorResult::Type::ERROR_DUPLICATE_UNIQUE_ID_HASH_CODE:
DCHECK_EQ(details_.size(), 2u);
return base::StringPrintf(
diff --git a/chromium/tools/traffic_annotation/auditor/auditor_result.h b/chromium/tools/traffic_annotation/auditor/auditor_result.h
index 702642b0b3a..fd95977da03 100644
--- a/chromium/tools/traffic_annotation/auditor/auditor_result.h
+++ b/chromium/tools/traffic_annotation/auditor/auditor_result.h
@@ -18,13 +18,15 @@ class AuditorResult {
// MISSING_TRAFFIC_ANNOTATION tag.
ERROR_NO_ANNOTATION, // A function is called with NO_ANNOTATION tag.
ERROR_SYNTAX, // Annotation syntax is not right.
- ERROR_RESERVED_UNIQUE_ID_HASH_CODE, // A unique id has a hash code similar
- // to a reserved word.
- ERROR_DUPLICATE_UNIQUE_ID_HASH_CODE, // Two unique ids have similar hash
- // codes.
- ERROR_UNIQUE_ID_INVALID_CHARACTER, // A unique id contanins a characer
- // which is not alphanumeric or
- // underline.
+ ERROR_RESERVED_UNIQUE_ID_HASH_CODE, // A unique id has a hash code similar
+ // to a reserved word.
+ ERROR_DEPRECATED_UNIQUE_ID_HASH_CODE, // A unique id has a hash code
+ // equal to a deprecated one.
+ ERROR_DUPLICATE_UNIQUE_ID_HASH_CODE, // Two unique ids have equal hash
+ // codes.
+ ERROR_UNIQUE_ID_INVALID_CHARACTER, // A unique id contanins a characer
+ // which is not alphanumeric or
+ // underline.
ERROR_MISSING_ANNOTATION, // A function that requires annotation is not
// annotated.
ERROR_MISSING_EXTRA_ID, // Annotation does not have a valid extra id.
diff --git a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.cc b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.cc
index 8bf0f74910e..11b6d9de440 100644
--- a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.cc
+++ b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.cc
@@ -4,6 +4,9 @@
#include "tools/traffic_annotation/auditor/traffic_annotation_auditor.h"
+#include <stdio.h>
+
+#include "base/files/file_enumerator.h"
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/process/launch.h"
@@ -11,9 +14,11 @@
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
#include "net/traffic_annotation/network_traffic_annotation.h"
#include "net/traffic_annotation/network_traffic_annotation_test_helper.h"
+#include "tools/traffic_annotation/auditor/traffic_annotation_exporter.h"
#include "tools/traffic_annotation/auditor/traffic_annotation_file_filter.h"
namespace {
@@ -48,30 +53,38 @@ struct AnnotationID {
AnnotationInstance* instance;
};
-// Removes all occurances of a charcter from a string and returns the modified
-// string.
-std::string RemoveChar(const std::string& source, char removee) {
- std::string output;
- output.reserve(source.length());
- for (const char* current = source.data(); *current; current++) {
- if (*current != removee)
- output += *current;
- }
- return output;
-}
-
const std::string kBlockTypes[] = {"ASSIGNMENT", "ANNOTATION", "CALL"};
-const base::FilePath kSafeListPath(
- FILE_PATH_LITERAL("tools/traffic_annotation/auditor/safe_list.txt"));
+const base::FilePath kSafeListPath =
+ base::FilePath(FILE_PATH_LITERAL("tools"))
+ .Append(FILE_PATH_LITERAL("traffic_annotation"))
+ .Append(FILE_PATH_LITERAL("auditor"))
+ .Append(FILE_PATH_LITERAL("safe_list.txt"));
+
+// The folder that includes the latest Clang built-in library. Inside this
+// folder, there should be another folder with version number, like
+// '.../lib/clang/6.0.0', which would be passed to the clang tool.
+const base::FilePath kClangLibraryPath =
+ base::FilePath(FILE_PATH_LITERAL("third_party"))
+ .Append(FILE_PATH_LITERAL("llvm-build"))
+ .Append(FILE_PATH_LITERAL("Release+Asserts"))
+ .Append(FILE_PATH_LITERAL("lib"))
+ .Append(FILE_PATH_LITERAL("clang"));
+
} // namespace
TrafficAnnotationAuditor::TrafficAnnotationAuditor(
const base::FilePath& source_path,
- const base::FilePath& build_path)
+ const base::FilePath& build_path,
+ const base::FilePath& clang_tool_path)
: source_path_(source_path),
build_path_(build_path),
- safe_list_loaded_(false) {}
+ clang_tool_path_(clang_tool_path),
+ safe_list_loaded_(false) {
+ DCHECK(!source_path.empty());
+ DCHECK(!build_path.empty());
+ DCHECK(!clang_tool_path.empty());
+}
TrafficAnnotationAuditor::~TrafficAnnotationAuditor() {}
@@ -82,11 +95,19 @@ int TrafficAnnotationAuditor::ComputeHashValue(const std::string& unique_id) {
: -1;
}
+base::FilePath TrafficAnnotationAuditor::GetClangLibraryPath() {
+ return base::FileEnumerator(source_path_.Append(kClangLibraryPath), false,
+ base::FileEnumerator::DIRECTORIES)
+ .Next();
+}
+
bool TrafficAnnotationAuditor::RunClangTool(
const std::vector<std::string>& path_filters,
const bool full_run) {
if (!safe_list_loaded_ && !LoadSafeList())
return false;
+
+ // Create a file to pass options to clang scripts.
base::FilePath options_filepath;
if (!base::CreateTemporaryFile(&options_filepath)) {
LOG(ERROR) << "Could not create temporary options file.";
@@ -97,9 +118,17 @@ bool TrafficAnnotationAuditor::RunClangTool(
LOG(ERROR) << "Could not create temporary options file.";
return false;
}
- fprintf(options_file,
- "--generate-compdb --tool=traffic_annotation_extractor -p=%s ",
- build_path_.MaybeAsASCII().c_str());
+
+ // As the checked out clang tool may be in a directory different from the
+ // default one (third_party/llvm-buid/Release+Asserts/bin), its path and
+ // clang's library folder should be passed to the run_tool.py script.
+ fprintf(
+ options_file,
+ "--generate-compdb --tool=traffic_annotation_extractor -p=%s "
+ "--tool-path=%s --tool-args=--extra-arg=-resource-dir=%s ",
+ build_path_.MaybeAsASCII().c_str(),
+ base::MakeAbsoluteFilePath(clang_tool_path_).MaybeAsASCII().c_str(),
+ base::MakeAbsoluteFilePath(GetClangLibraryPath()).MaybeAsASCII().c_str());
// |safe_list_[ALL]| is not passed when |full_run| is happening as there is
// no way to pass it to run_tools.py except enumerating all alternatives.
@@ -135,10 +164,8 @@ bool TrafficAnnotationAuditor::RunClangTool(
}
base::CloseFile(options_file);
- base::CommandLine cmdline(source_path_.Append(FILE_PATH_LITERAL("tools"))
- .Append(FILE_PATH_LITERAL("clang"))
- .Append(FILE_PATH_LITERAL("scripts"))
- .Append(FILE_PATH_LITERAL("run_tool.py")));
+ base::CommandLine cmdline(source_path_.Append(
+ FILE_PATH_LITERAL("tools/clang/scripts/run_tool.py")));
#if defined(OS_WIN)
cmdline.PrependWrapper(L"python");
@@ -147,6 +174,7 @@ bool TrafficAnnotationAuditor::RunClangTool(
cmdline.AppendArg(base::StringPrintf(
"--options-file=%s", options_filepath.MaybeAsASCII().c_str()));
+ // Run, and clean after.
bool result = base::GetAppOutput(cmdline, &clang_tool_raw_output_);
base::DeleteFile(options_filepath, false);
@@ -178,11 +206,10 @@ bool TrafficAnnotationAuditor::ParseClangToolRawOutput() {
if (!safe_list_loaded_ && !LoadSafeList())
return false;
// Remove possible carriage return characters before splitting lines.
- // Not using base::RemoveChars as the input is ~47M and the implementation is
- // too slow for it.
- std::vector<std::string> lines =
- base::SplitString(RemoveChar(clang_tool_raw_output_, '\r'), "\n",
- base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ std::string temp_string;
+ base::RemoveChars(clang_tool_raw_output_, "\r", &temp_string);
+ std::vector<std::string> lines = base::SplitString(
+ temp_string, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
for (unsigned int current = 0; current < lines.size(); current++) {
// All blocks reported by clang tool start with '====', so we can ignore
// all lines that do not start with a '='.
@@ -325,11 +352,18 @@ void TrafficAnnotationAuditor::PurgeAnnotations(
extracted_annotations_.end());
}
-void TrafficAnnotationAuditor::CheckDuplicateHashes() {
+bool TrafficAnnotationAuditor::CheckDuplicateHashes() {
const std::map<int, std::string> reserved_ids = GetReservedUniqueIDs();
std::map<int, std::vector<AnnotationID>> collisions;
std::set<int> to_be_purged;
+ std::set<int> deprecated_ids;
+
+ // Load deprecated Hashcodes.
+ if (!TrafficAnnotationExporter(source_path_)
+ .GetDeprecatedHashCodes(&deprecated_ids)) {
+ return false;
+ }
for (AnnotationInstance& instance : extracted_annotations_) {
// Check if partial and branched completing annotation have an extra id
@@ -382,6 +416,16 @@ void TrafficAnnotationAuditor::CheckDuplicateHashes() {
continue;
}
+ // If the id's hash code was formerly used by a deprecated annotation,
+ // add an error.
+ if (base::ContainsKey(deprecated_ids, current.hash)) {
+ errors_.push_back(AuditorResult(
+ AuditorResult::Type::ERROR_DEPRECATED_UNIQUE_ID_HASH_CODE,
+ current.text, instance.proto.source().file(),
+ instance.proto.source().line()));
+ continue;
+ }
+
// Check for collisions.
if (!base::ContainsKey(collisions, current.hash)) {
collisions[current.hash] = std::vector<AnnotationID>();
@@ -425,6 +469,7 @@ void TrafficAnnotationAuditor::CheckDuplicateHashes() {
}
PurgeAnnotations(to_be_purged);
+ return true;
}
void TrafficAnnotationAuditor::CheckUniqueIDsFormat() {
@@ -608,10 +653,12 @@ void TrafficAnnotationAuditor::CheckAnnotationsContents() {
new_annotations.end());
}
-void TrafficAnnotationAuditor::RunAllChecks() {
- CheckDuplicateHashes();
+bool TrafficAnnotationAuditor::RunAllChecks() {
+ if (!CheckDuplicateHashes())
+ return false;
CheckUniqueIDsFormat();
CheckAnnotationsContents();
CheckAllRequiredFunctionsAreAnnotated();
+ return true;
} \ No newline at end of file
diff --git a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.h b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.h
index dc438e8d486..5d32bc6cab2 100644
--- a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.h
+++ b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor.h
@@ -40,8 +40,13 @@ struct AuditorException {
class TrafficAnnotationAuditor {
public:
+ // Creates an auditor object, storing the following paths:
+ // |source_path|: Path to the src directory.
+ // |build_path|: Path to a compiled build directory.
+ // |clang_tool_path|: Path to the 'traffic_annotation_extractor' clang tool.
TrafficAnnotationAuditor(const base::FilePath& source_path,
- const base::FilePath& build_path);
+ const base::FilePath& build_path,
+ const base::FilePath& clang_tool_path);
~TrafficAnnotationAuditor();
// Runs traffic_annotation_extractor clang tool and puts its output in
@@ -66,9 +71,10 @@ class TrafficAnnotationAuditor {
AuditorException::ExceptionType exception_type);
// Checks to see if any unique id or extra id or their hash code are
- // duplicated. Adds errors to |errors_| and purges annotations with duplicate
- // ids.
- void CheckDuplicateHashes();
+ // duplicated, either in currently existing annotations, or in deprecated
+ // ones. Adds errors to |errors_| and purges annotations with duplicate ids.
+ // Returns false if any errors happen while checking.
+ bool CheckDuplicateHashes();
// Checks to see if unique ids only include alphanumeric characters and
// underline. Adds errors to |errors_| and purges annotations with
@@ -89,7 +95,7 @@ class TrafficAnnotationAuditor {
bool CheckIfCallCanBeUnannotated(const CallInstance& call);
// Performs all checks on extracted annotations and calls.
- void RunAllChecks();
+ bool RunAllChecks();
// Returns a mapping of reserved unique ids' hash codes to the unique ids'
// texts. This list includes all unique ids that are defined in
@@ -135,9 +141,13 @@ class TrafficAnnotationAuditor {
gn_file_for_test_ = file_path;
}
+ // Returns the path to clang internal libraries.
+ base::FilePath GetClangLibraryPath();
+
private:
const base::FilePath source_path_;
const base::FilePath build_path_;
+ const base::FilePath clang_tool_path_;
std::string clang_tool_raw_output_;
std::vector<AnnotationInstance> extracted_annotations_;
diff --git a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_ui.cc b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_ui.cc
index 5b84a737ada..29c1afddc4c 100644
--- a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_ui.cc
+++ b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_ui.cc
@@ -19,7 +19,7 @@ namespace {
const char* HELP_TEXT = R"(
Traffic Annotation Auditor
Extracts network traffic annotaions from the repository, audits them for errors
-and coverage, and produces reports.
+and coverage, produces reports, and updates related files.
Usage: traffic_annotation_auditor [OPTION]... [path_filters]
@@ -32,6 +32,9 @@ Options:
--source-path Optional path to the src directory. If not provided and
build-path is available, assumed to be 'build-path/../..',
otherwise current directory.
+ --tool-path Optional path to traffic_annotation_extractor clang tool.
+ If not specified, it's assumed to be in the same path as
+ auditor's executable.
--extractor-output Optional path to the temporary file that extracted
annotations will be stored into.
--extracted-input Optional path to the file that temporary extracted
@@ -40,6 +43,12 @@ Options:
--full-run Optional flag asking the tool to run on the whole
repository without text filtering files. Using this flag
may increase processing time x40.
+ --test-only Optional flag to request just running tests and not
+ updating any file. If not specified,
+ 'tools/traffic_annotation/summary/annotations.xml' might
+ get updated and if it does, 'tools/traffic_annotation/
+ scripts/annotations_xml_downstream_updater.py will
+ be called to update downstream files.
--summary-file Optional path to the output file with all annotations.
--annotations-file Optional path to a TSV output file with all annotations.
path_filters Optional paths to filter what files the tool is run on.
@@ -48,11 +57,36 @@ Example:
traffic_annotation_auditor --build-dir=out/Debug summary-file=report.txt
)";
-const base::FilePath kAnnotationsXmlPath(
- FILE_PATH_LITERAL("tools/traffic_annotation/summary/annotations.xml"));
+const base::FilePath kDownstreamUpdater =
+ base::FilePath(FILE_PATH_LITERAL("tools"))
+ .Append(FILE_PATH_LITERAL("traffic_annotation"))
+ .Append(FILE_PATH_LITERAL("scripts"))
+ .Append(FILE_PATH_LITERAL("annotations_xml_downstream_caller.py"));
} // namespace
+// Calls |kDownstreamUpdater| script to update files that depend on
+// annotations.xml.
+bool RunAnnotationDownstreamUpdater(const base::FilePath& source_path) {
+ base::CommandLine cmdline(source_path.Append(kDownstreamUpdater));
+ int exit_code;
+
+#if defined(OS_WIN)
+ cmdline.PrependWrapper(L"python");
+ exit_code =
+ system(base::UTF16ToASCII(cmdline.GetCommandLineString()).c_str());
+#else
+ exit_code = system(cmdline.GetCommandLineString().c_str());
+#endif
+
+ if (exit_code) {
+ LOG(ERROR) << "Running " << kDownstreamUpdater.MaybeAsASCII()
+ << " failed with exit code: " << exit_code;
+ return false;
+ }
+ return true;
+}
+
// Writes a summary of annotations, calls, and errors.
bool WriteSummaryFile(const base::FilePath& filepath,
const std::vector<AnnotationInstance>& annotations,
@@ -257,11 +291,13 @@ int main(int argc, char* argv[]) {
base::FilePath build_path = command_line.GetSwitchValuePath("build-path");
base::FilePath source_path = command_line.GetSwitchValuePath("source-path");
+ base::FilePath tool_path = command_line.GetSwitchValuePath("tool-path");
base::FilePath extractor_output =
command_line.GetSwitchValuePath("extractor-output");
base::FilePath extractor_input =
command_line.GetSwitchValuePath("extractor-input");
bool full_run = command_line.HasSwitch("full-run");
+ bool test_only = command_line.HasSwitch("test-only");
base::FilePath summary_file = command_line.GetSwitchValuePath("summary-file");
base::FilePath annotations_file =
command_line.GetSwitchValuePath("annotations-file");
@@ -274,6 +310,11 @@ int main(int argc, char* argv[]) {
path_filters = command_line.GetArgs();
#endif
+ // If tool path is not specified, assume it is in the same path as this
+ // executable.
+ if (tool_path.empty())
+ tool_path = command_line.GetProgram().DirName();
+
// If source path is not provided, guess it using build path or current
// directory.
if (source_path.empty()) {
@@ -284,20 +325,21 @@ int main(int argc, char* argv[]) {
.Append(base::FilePath::kParentDirectory);
}
- TrafficAnnotationAuditor auditor(source_path, build_path);
+ // Get build directory, if it is empty issue an error.
+ if (build_path.empty()) {
+ LOG(ERROR)
+ << "You must specify a compiled build directory to run the auditor.\n";
+ return 1;
+ }
+
+ TrafficAnnotationAuditor auditor(source_path, build_path, tool_path);
// Extract annotations.
if (extractor_input.empty()) {
- // Get build directory, if it is empty issue an error.
- if (build_path.empty()) {
- LOG(ERROR)
- << "You must either specify the build directory to run the clang "
- "tool and extract annotations, or specify the input file where "
- "extracted annotations already exist.\n";
+ if (!auditor.RunClangTool(path_filters, full_run)) {
+ LOG(ERROR) << "Failed to run clang tool.";
return 1;
}
- if (!auditor.RunClangTool(path_filters, full_run))
- return 1;
// Write extractor output if requested.
if (!extractor_output.empty()) {
@@ -321,7 +363,10 @@ int main(int argc, char* argv[]) {
return 1;
// Perform checks.
- auditor.RunAllChecks();
+ if (!auditor.RunAllChecks()) {
+ LOG(ERROR) << "Running checks failed.";
+ return 1;
+ }
// Write the summary file.
if (!summary_file.empty() &&
@@ -331,15 +376,6 @@ int main(int argc, char* argv[]) {
return 1;
}
- // Update annotations list.
- if (!TrafficAnnotationExporter().UpdateAnnotationsXML(
- source_path.Append(kAnnotationsXmlPath),
- auditor.extracted_annotations(),
- TrafficAnnotationAuditor::GetReservedUniqueIDs())) {
- LOG(ERROR) << "Could not update annotations XML.";
- return 1;
- }
-
// Write annotations TSV file.
if (!annotations_file.empty() &&
!WriteAnnotationsFile(annotations_file,
@@ -348,6 +384,23 @@ int main(int argc, char* argv[]) {
return 1;
}
+ // Test/Update annotations.xml.
+ TrafficAnnotationExporter exporter(source_path);
+ if (!exporter.UpdateAnnotations(
+ auditor.extracted_annotations(),
+ TrafficAnnotationAuditor::GetReservedUniqueIDs())) {
+ return 1;
+ }
+ if (exporter.modified()) {
+ if (test_only) {
+ printf("Error: annotation.xml needs update.\n");
+ } else if (!exporter.SaveAnnotationsXML() ||
+ !RunAnnotationDownstreamUpdater(source_path)) {
+ LOG(ERROR) << "Could not update annotations XML or downstream files.";
+ return 1;
+ }
+ }
+
// Dump Errors and Warnings to stdout.
const std::vector<AuditorResult>& errors = auditor.errors();
for (const auto& error : errors) {
diff --git a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_unittest.cc b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_unittest.cc
index bca9d56eaf0..b9ef384775f 100644
--- a/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_unittest.cc
+++ b/chromium/tools/traffic_annotation/auditor/traffic_annotation_auditor_unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "tools/traffic_annotation/auditor/traffic_annotation_auditor.h"
+
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/memory/ptr_util.h"
@@ -11,9 +12,12 @@
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
#include "net/traffic_annotation/network_traffic_annotation.h"
#include "net/traffic_annotation/network_traffic_annotation_test_helper.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "tools/traffic_annotation/auditor/traffic_annotation_exporter.h"
#include "tools/traffic_annotation/auditor/traffic_annotation_file_filter.h"
namespace {
@@ -31,7 +35,24 @@ const char* kIrrelevantFiles[] = {
const char* kRelevantFiles[] = {
"tools/traffic_annotation/auditor/tests/relevant_file_name_and_content.cc",
"tools/traffic_annotation/auditor/tests/relevant_file_name_and_content.mm"};
-}
+
+const base::FilePath kTestsFolder =
+ base::FilePath(FILE_PATH_LITERAL("tools"))
+ .Append(FILE_PATH_LITERAL("traffic_annotation"))
+ .Append(FILE_PATH_LITERAL("auditor"))
+ .Append(FILE_PATH_LITERAL("tests"));
+
+const base::FilePath kClangToolPath =
+ base::FilePath(FILE_PATH_LITERAL("tools"))
+ .Append(FILE_PATH_LITERAL("traffic_annotation/bin"));
+
+const base::FilePath kDownstreamUnittests =
+ base::FilePath(FILE_PATH_LITERAL("tools"))
+ .Append(FILE_PATH_LITERAL("traffic_annotation"))
+ .Append(FILE_PATH_LITERAL("scripts"))
+ .Append(FILE_PATH_LITERAL("annotations_xml_downstream_caller.py"));
+
+} // namespace
using namespace testing;
@@ -43,16 +64,31 @@ class TrafficAnnotationAuditorTest : public ::testing::Test {
return;
}
- tests_folder_ = source_path_.Append(FILE_PATH_LITERAL("tools"))
- .Append(FILE_PATH_LITERAL("traffic_annotation"))
- .Append(FILE_PATH_LITERAL("auditor"))
- .Append(FILE_PATH_LITERAL("tests"));
- auditor_ =
- base::MakeUnique<TrafficAnnotationAuditor>(source_path(), build_path());
+ tests_folder_ = source_path_.Append(kTestsFolder);
+
+#if defined(OS_WIN)
+ base::FilePath platform_name(FILE_PATH_LITERAL("win32"));
+#elif defined(OS_LINUX)
+ base::FilePath platform_name(FILE_PATH_LITERAL("linux64"));
+#elif defined(OS_MACOSX)
+ base::FilePath platform_name(FILE_PATH_LITERAL("mac"));
+#else
+ NOTREACHED() << "Unexpected platform.";
+#endif
+
+ base::FilePath clang_tool_path =
+ source_path_.Append(kClangToolPath).Append(platform_name);
+
+ // As build path is not available and not used in tests, the default (empty)
+ // build path is passed to auditor.
+ auditor_ = std::make_unique<TrafficAnnotationAuditor>(
+ source_path_,
+ source_path_.Append(FILE_PATH_LITERAL("out"))
+ .Append(FILE_PATH_LITERAL("Default")),
+ clang_tool_path);
}
const base::FilePath source_path() const { return source_path_; }
- const base::FilePath build_path() const { return build_path_; }
const base::FilePath tests_folder() const { return tests_folder_; };
TrafficAnnotationAuditor& auditor() { return *auditor_; }
@@ -74,9 +110,6 @@ class TrafficAnnotationAuditorTest : public ::testing::Test {
private:
base::FilePath source_path_;
- base::FilePath build_path_; // Currently stays empty. Will be set if access
- // to a compiled build directory would be
- // granted.
base::FilePath tests_folder_;
std::unique_ptr<TrafficAnnotationAuditor> auditor_;
};
@@ -88,7 +121,8 @@ AuditorResult::Type TrafficAnnotationAuditorTest::Deserialize(
EXPECT_TRUE(base::ReadFileToString(
tests_folder_.Append(FILE_PATH_LITERAL("extractor_outputs"))
.AppendASCII(file_name),
- &file_content));
+ &file_content))
+ << file_name;
base::RemoveChars(file_content, "\r", &file_content);
std::vector<std::string> lines = base::SplitString(
file_content, "\n", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
@@ -818,4 +852,34 @@ TEST_F(TrafficAnnotationAuditorTest, CreateCompleteAnnotation) {
NetworkTrafficAnnotation_TrafficSemantics_Destination_LOCAL);
EXPECT_NE(instance.CreateCompleteAnnotation(other, &combination).type(),
AuditorResult::Type::RESULT_OK);
-} \ No newline at end of file
+}
+
+// Tests if Annotations.xml has proper content.
+TEST_F(TrafficAnnotationAuditorTest, AnnotationsXML) {
+ TrafficAnnotationExporter exporter(source_path());
+
+ EXPECT_TRUE(exporter.LoadAnnotationsXML());
+ EXPECT_TRUE(exporter.CheckReportItems());
+}
+
+// Tests if downstream files depending on of Annotations.xml are updated.
+TEST_F(TrafficAnnotationAuditorTest, AnnotationsDownstreamUnittests) {
+ base::CommandLine cmdline(source_path().Append(kDownstreamUnittests));
+ cmdline.AppendSwitch("test");
+
+ int tests_result;
+#if defined(OS_WIN)
+ cmdline.PrependWrapper(L"python");
+ tests_result =
+ system(base::UTF16ToASCII(cmdline.GetCommandLineString()).c_str());
+#else
+ tests_result = system(cmdline.GetCommandLineString().c_str());
+#endif
+ EXPECT_EQ(0, tests_result);
+}
+
+// Tests if AnnotationInstance::GetClangLibraryPath finds a path.
+TEST_F(TrafficAnnotationAuditorTest, GetClangLibraryPath) {
+ base::FilePath clang_library = auditor().GetClangLibraryPath();
+ EXPECT_FALSE(clang_library.empty());
+}
diff --git a/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.cc b/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.cc
index a670c2dce02..91ffeabbf2e 100644
--- a/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.cc
+++ b/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.cc
@@ -9,8 +9,10 @@
#include "base/files/file_util.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
+#include "build/build_config.h"
#include "third_party/libxml/chromium/libxml_utils.h"
#include "third_party/protobuf/src/google/protobuf/text_format.h"
#include "tools/traffic_annotation/auditor/traffic_annotation_auditor.h"
@@ -25,112 +27,186 @@ const char* kXmlComment =
"\nRefer to README.md for content description and update process.\n"
"-->\n\n";
+const base::FilePath kAnnotationsXmlPath =
+ base::FilePath(FILE_PATH_LITERAL("tools"))
+ .Append(FILE_PATH_LITERAL("traffic_annotation"))
+ .Append(FILE_PATH_LITERAL("summary"))
+ .Append(FILE_PATH_LITERAL("annotations.xml"));
+
} // namespace
-// Loads annotations from the given XML file.
-bool TrafficAnnotationExporter::LoadAnnotationsFromXML(
- const base::FilePath& filepath,
- std::vector<ReportItem>* items) {
+TrafficAnnotationExporter::ReportItem::ReportItem()
+ : unique_id_hash_code(-1), content_hash_code(-1) {}
+
+TrafficAnnotationExporter::ReportItem::ReportItem(
+ const TrafficAnnotationExporter::ReportItem& other)
+ : unique_id_hash_code(other.unique_id_hash_code),
+ deprecation_date(other.deprecation_date),
+ content_hash_code(other.content_hash_code),
+ os_list(other.os_list) {}
+
+TrafficAnnotationExporter::ReportItem::~ReportItem() {}
+
+TrafficAnnotationExporter::TrafficAnnotationExporter(
+ const base::FilePath& source_path)
+ : source_path_(source_path), modified_(false) {}
+
+TrafficAnnotationExporter::~TrafficAnnotationExporter() {}
+
+bool TrafficAnnotationExporter::LoadAnnotationsXML() {
+ report_items_.clear();
XmlReader reader;
- if (!reader.LoadFile(filepath.MaybeAsASCII())) {
- LOG(ERROR) << "Could not open former annotations list.";
+ if (!reader.LoadFile(
+ source_path_.Append(kAnnotationsXmlPath).MaybeAsASCII())) {
+ LOG(ERROR) << "Could not load '"
+ << source_path_.Append(kAnnotationsXmlPath).MaybeAsASCII()
+ << "'.";
return false;
}
- bool all_ok = true;
+ bool all_ok = false;
while (reader.Read()) {
+ all_ok = true;
if (reader.NodeName() != "item")
continue;
ReportItem item;
std::string temp;
+ std::string unique_id;
- all_ok &= reader.NodeAttribute("id", &item.unique_id);
+ all_ok &= reader.NodeAttribute("id", &unique_id);
all_ok &= reader.NodeAttribute("hash_code", &temp) &&
base::StringToInt(temp, &item.unique_id_hash_code);
-
if (all_ok && reader.NodeAttribute("content_hash_code", &temp))
all_ok &= base::StringToInt(temp, &item.content_hash_code);
else
item.content_hash_code = -1;
- if (!reader.NodeAttribute("deprecated", &item.deprecation_date))
- item.deprecation_date = "";
+ reader.NodeAttribute("deprecated", &item.deprecation_date);
+
+ if (reader.NodeAttribute("os_list", &temp)) {
+ item.os_list = base::SplitString(temp, ",", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ }
if (!all_ok) {
- LOG(ERROR) << "Unexpected format in former annotations list.";
- return false;
+ LOG(ERROR) << "Unexpected format in annotations.xml.";
+ break;
}
- items->push_back(item);
+ report_items_.insert(std::make_pair(unique_id, item));
}
- return true;
+ modified_ = false;
+ return all_ok;
}
-bool TrafficAnnotationExporter::UpdateAnnotationsXML(
- const base::FilePath& filepath,
+bool TrafficAnnotationExporter::UpdateAnnotations(
const std::vector<AnnotationInstance>& annotations,
const std::map<int, std::string>& reserved_ids) {
- std::vector<ReportItem> items;
- std::set<int> used_hash_codes;
+ std::string platform;
+#if defined(OS_LINUX)
+ platform = "linux";
+#elif defined(OS_WIN)
+ platform = "windows";
+#else
+ NOTREACHED() << "Other platforms are not supported yet.";
+#endif
- // Add annotations.
- for (AnnotationInstance item : annotations) {
- std::string content;
+ if (report_items_.empty() && !LoadAnnotationsXML())
+ return false;
+
+ std::set<int> current_platform_hashcodes;
+
+ // Iterate current annotations and add/update.
+ for (AnnotationInstance annotation : annotations) {
// Source tag is not used in computing the hashcode, as we don't need
// sensitivity to changes in source location (filepath, line number,
// and function).
- item.proto.clear_source();
- google::protobuf::TextFormat::PrintToString(item.proto, &content);
+ std::string content;
+ annotation.proto.clear_source();
+ google::protobuf::TextFormat::PrintToString(annotation.proto, &content);
+ int content_hash_code = TrafficAnnotationAuditor::ComputeHashValue(content);
- items.push_back(
- ReportItem(item.proto.unique_id(), item.unique_id_hash_code,
- TrafficAnnotationAuditor::ComputeHashValue(content)));
- used_hash_codes.insert(item.unique_id_hash_code);
+ if (base::ContainsKey(report_items_, annotation.proto.unique_id())) {
+ ReportItem* current = &report_items_[annotation.proto.unique_id()];
+ if (!base::ContainsValue(current->os_list, platform)) {
+ current->os_list.push_back(platform);
+ modified_ = true;
+ }
+ } else {
+ ReportItem new_item;
+ new_item.unique_id_hash_code = annotation.unique_id_hash_code;
+ new_item.content_hash_code = content_hash_code;
+ new_item.os_list.push_back(platform);
+ report_items_[annotation.proto.unique_id()] = new_item;
+ modified_ = true;
+ }
+ current_platform_hashcodes.insert(annotation.unique_id_hash_code);
}
- // Add reserved ids.
- for (const auto& item : reserved_ids) {
- items.push_back(ReportItem(item.second, item.first));
- used_hash_codes.insert(item.first);
+ // If a none-reserved annotation is removed from current platform, update it.
+ for (auto& item : report_items_) {
+ if (base::ContainsValue(item.second.os_list, platform) &&
+ item.second.content_hash_code != -1 &&
+ !base::ContainsKey(current_platform_hashcodes,
+ item.second.unique_id_hash_code)) {
+ base::Erase(item.second.os_list, platform);
+ modified_ = true;
+ }
}
- // Add deprecated items
- std::vector<ReportItem> former_items;
- if (!LoadAnnotationsFromXML(filepath, &former_items))
- return false;
+ // If there is a new reserved id, add it.
+ for (const auto& item : reserved_ids) {
+ if (!base::ContainsKey(report_items_, item.second)) {
+ ReportItem new_item;
+ new_item.unique_id_hash_code = item.first;
+ new_item.os_list.push_back("all");
+ report_items_[item.second] = new_item;
+ modified_ = true;
+ }
+ }
- for (ReportItem& item : former_items) {
- if (!base::ContainsKey(used_hash_codes, item.unique_id_hash_code)) {
+ // If there are annotations that are not used in any OS, set the deprecation
+ // flag.
+ for (auto& item : report_items_) {
+ if (item.second.os_list.empty() && item.second.deprecation_date.empty()) {
base::Time::Exploded now;
base::Time::Now().UTCExplode(&now);
- if (item.deprecation_date.empty())
- item.deprecation_date = base::StringPrintf("%i-%02i-%02i", now.year,
- now.month, now.day_of_month);
- items.push_back(item);
+ item.second.deprecation_date = base::StringPrintf(
+ "%i-%02i-%02i", now.year, now.month, now.day_of_month);
+ modified_ = true;
}
}
- // Sort and write.
- std::sort(items.begin(), items.end(), ReportItem::Compare);
+ return CheckReportItems();
+}
+bool TrafficAnnotationExporter::SaveAnnotationsXML() {
XmlWriter writer;
writer.StartWriting();
writer.StartElement("annotations");
- for (const ReportItem& item : items) {
+ for (const auto& item : report_items_) {
writer.StartElement("item");
- writer.AddAttribute("id", item.unique_id);
- writer.AddAttribute("hash_code",
- base::StringPrintf("%i", item.unique_id_hash_code));
- if (!item.deprecation_date.empty())
- writer.AddAttribute("deprecated", item.deprecation_date);
- if (item.content_hash_code == -1)
+ writer.AddAttribute("id", item.first);
+ writer.AddAttribute(
+ "hash_code", base::StringPrintf("%i", item.second.unique_id_hash_code));
+ if (!item.second.deprecation_date.empty())
+ writer.AddAttribute("deprecated", item.second.deprecation_date);
+ if (item.second.content_hash_code == -1)
writer.AddAttribute("reserved", "1");
else
- writer.AddAttribute("content_hash_code",
- base::StringPrintf("%i", item.content_hash_code));
+ writer.AddAttribute(
+ "content_hash_code",
+ base::StringPrintf("%i", item.second.content_hash_code));
+ std::string os_list;
+ for (const std::string& platform : item.second.os_list)
+ os_list += platform + ",";
+ if (!os_list.empty()) {
+ os_list.pop_back();
+ writer.AddAttribute("os_list", os_list);
+ }
writer.EndElement();
}
writer.EndElement();
@@ -140,6 +216,43 @@ bool TrafficAnnotationExporter::UpdateAnnotationsXML(
// Add comment before annotation tag (and after xml version).
xml_content.insert(xml_content.find("<annotations>"), kXmlComment);
- return base::WriteFile(filepath, xml_content.c_str(), xml_content.length()) !=
- -1;
+ return base::WriteFile(source_path_.Append(kAnnotationsXmlPath),
+ xml_content.c_str(), xml_content.length()) != -1;
+}
+
+bool TrafficAnnotationExporter::GetDeprecatedHashCodes(
+ std::set<int>* hash_codes) {
+ if (report_items_.empty() && !LoadAnnotationsXML())
+ return false;
+
+ hash_codes->clear();
+ for (const auto& item : report_items_) {
+ if (!item.second.deprecation_date.empty())
+ hash_codes->insert(item.second.unique_id_hash_code);
+ }
+ return true;
+}
+
+bool TrafficAnnotationExporter::CheckReportItems() {
+ // Check for annotation hash code duplications.
+ std::set<int> used_codes;
+ for (auto& item : report_items_) {
+ if (base::ContainsKey(used_codes, item.second.unique_id_hash_code)) {
+ LOG(ERROR) << "Unique id hash code " << item.second.unique_id_hash_code
+ << " is used more than once.";
+ return false;
+ } else {
+ used_codes.insert(item.second.unique_id_hash_code);
+ }
+ }
+
+ // Check for coexistence of OS(es) and deprecation date.
+ for (auto& item : report_items_) {
+ if (!item.second.deprecation_date.empty() && !item.second.os_list.empty()) {
+ LOG(ERROR) << "Annotation " << item.first
+ << " has a deprecation date and at least one active OS.";
+ return false;
+ }
+ }
+ return true;
} \ No newline at end of file
diff --git a/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.h b/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.h
index 0c0328850ac..6717cf997c4 100644
--- a/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.h
+++ b/chromium/tools/traffic_annotation/auditor/traffic_annotation_exporter.h
@@ -6,48 +6,54 @@
#define TOOLS_TRAFFIC_ANNOTATION_AUDITOR_TRAFFIC_ANNOTATION_EXPORTER_H_
#include <map>
+#include <set>
#include <vector>
#include "base/files/file_path.h"
-#include "base/strings/string_util.h"
#include "tools/traffic_annotation/auditor/instance.h"
class TrafficAnnotationExporter {
public:
- TrafficAnnotationExporter() = default;
- ~TrafficAnnotationExporter() = default;
+ TrafficAnnotationExporter(const base::FilePath& source_path);
+ ~TrafficAnnotationExporter();
TrafficAnnotationExporter(const TrafficAnnotationExporter&) = delete;
TrafficAnnotationExporter(TrafficAnnotationExporter&&) = delete;
- // Updates the xml file including annotations unique id, hash code, content
- // hash code, and a flag specifying that annotation is depricated.
- bool UpdateAnnotationsXML(const base::FilePath& filepath,
- const std::vector<AnnotationInstance>& annotations,
- const std::map<int, std::string>& reserved_ids);
+ // Loads annotations from annotations.xml file into |report_items_|.
+ bool LoadAnnotationsXML();
+
+ // Updates |report_items_| with current set of extracted annotations and
+ // reserved ids. Sets the |modified_| flag if any item is updated.
+ bool UpdateAnnotations(const std::vector<AnnotationInstance>& annotations,
+ const std::map<int, std::string>& reserved_ids);
+
+ // Saves |report_items_| into annotations.xml.
+ bool SaveAnnotationsXML();
+
+ // Produces the list of deprecated hash codes. Returns false if
+ // annotations.xml is not and cannot be loaded.
+ bool GetDeprecatedHashCodes(std::set<int>* hash_codes);
+
+ bool modified() { return modified_; }
+
+ // Runs tests on content of |report_items_|.
+ bool CheckReportItems();
private:
struct ReportItem {
- ReportItem(std::string id, int hash_code, int content_hash)
- : unique_id(id),
- unique_id_hash_code(hash_code),
- deprecation_date(std::string()),
- content_hash_code(content_hash) {}
- ReportItem(std::string id, int hash_code) : ReportItem(id, hash_code, -1) {}
- ReportItem() : ReportItem(std::string(), -1, -1) {}
-
- static bool Compare(const ReportItem& a, const ReportItem& b) {
- return base::CompareCaseInsensitiveASCII(a.unique_id, b.unique_id) < 0;
- }
-
- std::string unique_id;
+ ReportItem();
+ ReportItem(const ReportItem& other);
+ ~ReportItem();
+
int unique_id_hash_code;
std::string deprecation_date;
int content_hash_code;
+ std::vector<std::string> os_list;
};
- // Loads annotations from the given XML file.
- bool LoadAnnotationsFromXML(const base::FilePath& filepath,
- std::vector<ReportItem>* items);
+ std::map<std::string, ReportItem> report_items_;
+ const base::FilePath source_path_;
+ bool modified_;
};
#endif // TOOLS_TRAFFIC_ANNOTATION_AUDITOR_TRAFFIC_ANNOTATION_EXPORTER_H_ \ No newline at end of file
diff --git a/chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_auditor.sha1 b/chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_auditor.sha1
new file mode 100644
index 00000000000..e569316bad5
--- /dev/null
+++ b/chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_auditor.sha1
@@ -0,0 +1 @@
+66999f5ba191e932d8b1a49012721bd1005c0876 \ No newline at end of file
diff --git a/chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_extractor.sha1 b/chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_extractor.sha1
new file mode 100644
index 00000000000..4d2c120c2e9
--- /dev/null
+++ b/chromium/tools/traffic_annotation/bin/linux64/traffic_annotation_extractor.sha1
@@ -0,0 +1 @@
+28f4eeb872ef8c038002ddab41dad30777deab4d \ No newline at end of file
diff --git a/chromium/tools/traffic_annotation/sample_traffic_annotation.cc b/chromium/tools/traffic_annotation/sample_traffic_annotation.cc
index 6e53fa056cf..361059e87e7 100644
--- a/chromium/tools/traffic_annotation/sample_traffic_annotation.cc
+++ b/chromium/tools/traffic_annotation/sample_traffic_annotation.cc
@@ -29,12 +29,13 @@ void network_traffic_annotation_template() {
setting: "..."
chrome_policy {
[POLICY_NAME] {
- policy_options {mode: MANDATORY/RECOMMENDED/UNSET}
[POLICY_NAME]: ...
}
}
policy_exception_justification = "..."
- })");
+ }
+ comments: "..."
+ )");
}
// An example on one level traffic annotation.
@@ -65,7 +66,6 @@ void network_traffic_annotation_sample() {
"Advanced. The feature is disabled by default."
chrome_policy {
SpellCheckServiceEnabled {
- policy_options {mode: MANDATORY}
SpellCheckServiceEnabled: false
}
}
@@ -107,7 +107,6 @@ void PrefetchImage1(const GURL& url) {
"feature is enabled by default."
chrome_policy {
SearchSuggestEnabled {
- policy_options {mode: MANDATORY}
SearchSuggestEnabled: false
}
}
@@ -141,7 +140,6 @@ void PrefetchImage2(const GURL& url) {
"'Passwords and forms'). There is no setting to disable the API."
chrome_policy {
PasswordManagerEnabled {
- policy_options {mode: MANDATORY}
PasswordManagerEnabled: false
}
}
@@ -232,7 +230,6 @@ void UploadLog(const bool& uma_service_type) {
"feature is enabled by default."
chrome_policy {
MetricsReportingEnabled {
- policy_options {mode: MANDATORY}
MetricsReportingEnabled: false
}
}
diff --git a/chromium/tools/traffic_annotation/scripts/README.md b/chromium/tools/traffic_annotation/scripts/README.md
new file mode 100644
index 00000000000..8124ddf497b
--- /dev/null
+++ b/chromium/tools/traffic_annotation/scripts/README.md
@@ -0,0 +1,7 @@
+# Traffic Annotation Scripts
+This file describes the scripts in `tools/traffic_annotation/scripts`
+
+# annotations_xml_downstream_caller.py
+This script calls all scripts that either update a file based on
+`annotations.xml`, or test if a file is in sync with it. Call with `test` switch
+for test mode.
diff --git a/chromium/tools/traffic_annotation/scripts/annotations_xml_downstream_caller.py b/chromium/tools/traffic_annotation/scripts/annotations_xml_downstream_caller.py
new file mode 100755
index 00000000000..ccf64b9f8b0
--- /dev/null
+++ b/chromium/tools/traffic_annotation/scripts/annotations_xml_downstream_caller.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Runs all scripts that use
+ 'tools/traffic_annotation/summary/annotations.xml' to update a file, or
+ test if a file is in sync with it. Use with 'test' switch for test mode.
+ Add your scripts to PROD_SCRIPTS or TEST_SCRIPTS.
+"""
+
+import os.path
+import subprocess
+import sys
+
+# Add your update scripts here. Each list item will have the script name and the
+# list of arguments.
+PROD_SCRIPTS = [
+ ["tools/metrics/histograms/update_traffic_annotation_histograms.py", []]]
+
+# Add your test scripts here. Each list item will have the script name and the
+# list of arguments.
+TEST_SCRIPTS = []
+
+
+def main(test_mode):
+ src_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "..", "..", ".."))
+
+ for script in TEST_SCRIPTS if test_mode else PROD_SCRIPTS:
+ args = [os.path.join(src_path, script[0])]
+ args += script[1]
+ if sys.platform == "win32":
+ args.insert(0, "python")
+
+ result = subprocess.call(args)
+ if result:
+ if test_mode:
+ logging.error("Running '%s' script failed with error code: %i." % (
+ script, result))
+ return result
+ return 0
+
+
+if __name__ == "__main__":
+ test_mode = (len(sys.argv) > 1 and "test" in sys.argv[1])
+ sys.exit(main(test_mode))
diff --git a/chromium/tools/traffic_annotation/scripts/check_annotations.py b/chromium/tools/traffic_annotation/scripts/check_annotations.py
new file mode 100755
index 00000000000..8dfa3515819
--- /dev/null
+++ b/chromium/tools/traffic_annotation/scripts/check_annotations.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs traffic_annotation_auditor on the given change list or all files to make
+sure network traffic annoations are syntactically and semantically correct and
+all required functions are annotated.
+"""
+
+import os
+import argparse
+import subprocess
+import sys
+
+
+
+
+class NetworkTrafficAnnotationChecker():
+ EXTENSIONS = ['.cc', '.mm',]
+ COULD_NOT_RUN_MESSAGE = \
+ 'Network traffic annotation presubmit check was not performed. To run ' \
+ 'it, a compiled build directory and traffic_annotation_auditor binary ' \
+ 'are required.'
+
+ def __init__(self, build_path=None):
+ """Initializes a NetworkTrafficAnnotationChecker object.
+
+ Args:
+ build_path: str Absolute or relative path to a fully compiled build
+ directory. If not specified, the script tries to find it based on
+ relative position of this file (src/tools/traffic_annotation).
+ """
+ self.this_dir = os.path.dirname(os.path.abspath(__file__))
+
+ if not build_path:
+ build_path = self._FindPossibleBuildPath()
+ if build_path:
+ self.build_path = os.path.abspath(build_path)
+
+ self.auditor_path = None
+ platform = {
+ 'linux2': 'linux64',
+ 'darwin': 'mac',
+ 'win32': 'win32',
+ }[sys.platform]
+ path = os.path.join(self.this_dir, 'bin', platform,
+ 'traffic_annotation_auditor')
+ if sys.platform == 'win32':
+ path += '.exe'
+ if os.path.exists(path):
+ self.auditor_path = path
+
+ def _FindPossibleBuildPath(self):
+ """Returns the first folder in //out that looks like a build dir."""
+ out = os.path.abspath(os.path.join(self.this_dir, '..', '..', 'out'))
+ if os.path.exists(out):
+ for folder in os.listdir(out):
+ candidate = os.path.join(out, folder)
+ if (os.path.isdir(candidate) and
+ self._CheckIfDirectorySeemsAsBuild(candidate)):
+ return candidate
+ return None
+
+ def _CheckIfDirectorySeemsAsBuild(self, path):
+ """Checks to see if a directory seems to be a compiled build directory by
+ searching for 'gen' folder and 'build.ninja' file in it.
+ """
+ return all(os.path.exists(
+ os.path.join(path, item)) for item in ('gen', 'build.ninja'))
+
+ def _AllArgsValid(self):
+ return self.auditor_path and self.build_path
+
+ def ShouldCheckFile(self, file_path):
+ """Returns true if the input file has an extension relevant to network
+ traffic annotations."""
+ return os.path.splitext(file_path)[1] in self.EXTENSIONS
+
+ def CheckFiles(self, file_paths=None, limit=0):
+ """Passes all given files to traffic_annotation_auditor to be checked for
+ possible violations of network traffic annotation rules.
+
+ Args:
+ file_paths: list of str List of files to check. If empty, the whole
+ repository will be checked.
+ limit: int Sets the upper threshold for number of errors and warnings,
+ use 0 for unlimited.
+
+ Returns:
+ warnings: list of str List of all issued warnings.
+ errors: list of str List of all issued errors.
+ """
+
+ # If for some reason the network traffic annotations become incompatible
+ # with the current version of clang, and this test starts failing,
+ # please set test_is_enabled to "False" and file a bug to get this
+ # reenabled, and cc the people listed in //tools/traffic_annotation/OWNERS.
+ # TODO(rhalavati): Actually enable the check.
+ test_is_enabled = False
+ if not test_is_enabled:
+ return [], []
+
+ if not self.build_path:
+ return [self.COULD_NOT_RUN_MESSAGE], []
+
+ if file_paths:
+ file_paths = [
+ file_path for file_path in file_paths if self.ShouldCheckFile(
+ file_path)]
+
+ if not file_paths:
+ return [], []
+ else:
+ file_paths = []
+
+ args = [self.auditor_path, "-build-path=" + self.build_path] + file_paths
+
+ if sys.platform.startswith("win"):
+ args.insert(0, sys.executable)
+
+ command = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout_text, stderr_text = command.communicate()
+
+ errors = []
+ warnings = []
+
+ if stderr_text:
+ warnings.append(
+ "Could not run network traffic annotation presubmit check. Returned "
+ "error from traffic_annotation_auditor: %s" % stderr_text)
+
+ for line in stdout_text.splitlines():
+ if line.startswith('Error: '):
+ errors.append(line[7:])
+ elif line.startswith('Warning: '):
+ warnings.append(line[9:])
+ if limit:
+ if len(errors) > limit:
+ errors = errors[:limit]
+ if len(warnings) + len(errors) > limit:
+ warnings = warnings[:limit-len(errors)]
+ return warnings, errors
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Traffic Annotation Auditor Presubmit checker.")
+ parser.add_argument(
+ '--build-path',
+ help='Specifies a compiled build directory, e.g. out/Debug. If not '
+ 'specified, the script tries to guess it. Will not proceed if not '
+ 'found.')
+ parser.add_argument(
+ '--limit', default=5,
+ help='Limit for the maximum number of returned errors and warnings. '
+ 'Default value is 5, use 0 for unlimited.')
+ args = parser.parse_args()
+
+ checker = NetworkTrafficAnnotationChecker(args.build_path)
+
+ warnings, errors = checker.CheckFiles(limit=args.limit)
+ if warnings:
+ print("Warnings:\n\t%s" % "\n\t".join(warnings))
+ if errors:
+ print("Errors:\n\t%s" % "\n\t".join(errors))
+
+ return 0
+
+
+if '__main__' == __name__:
+ sys.exit(main())
diff --git a/chromium/tools/traffic_annotation/summary/README.md b/chromium/tools/traffic_annotation/summary/README.md
index 73d6af16c10..9fa21ff76af 100644
--- a/chromium/tools/traffic_annotation/summary/README.md
+++ b/chromium/tools/traffic_annotation/summary/README.md
@@ -5,16 +5,16 @@ This file describes the `tools/traffic_annotation/summary/annotations.xml`.
`annotations.xml` includes the summary of all network traffic annotations in
Chromium repository. The content includes complete annotations and the merged
partial and completing (and branched completing) annotations.
-For each annotation, unique id, hash code of unique id, and hash code of content
-is presented. If annotation is a reserved one, instead of content hash code, a
-`reserved` attribute is included.
+For each annotation, unique id, hash code of unique id, hash code of the
+content, and the list of OSes using this annotation is presented. If annotation
+is a reserved one, instead of content hash code, a `reserved` attribute is
+included.
Once an annotation is removed from the repository, a `deprecated` attribute is
added to its item in this file, with value equal to the deprecation date.
These items can be manually or automatically pruned after sufficient time.
# How to Generate/Update.
Run `traffic_annotation_auditor` to check for annotations correctness and
-automatic update.
-After each change in `annotations.xml`, please call
-`tools/metrics/histograms/update_traffic_annotation_histograms.py` to update
-annotation enums used in histograms. \ No newline at end of file
+automatic update. After each modification of`annotations.xml`, auditor calls
+`tools/traffic_annotation/scripts/annotations_xml_downstream_updater.py` to
+update all users of this file.
diff --git a/chromium/tools/traffic_annotation/summary/annotations.xml b/chromium/tools/traffic_annotation/summary/annotations.xml
index 0a3c1437b8d..68609c0804c 100644
--- a/chromium/tools/traffic_annotation/summary/annotations.xml
+++ b/chromium/tools/traffic_annotation/summary/annotations.xml
@@ -8,197 +8,207 @@ Refer to README.md for content description and update process.
-->
<annotations>
- <item id="affiliation_lookup" hash_code="111904019" content_hash_code="81061452"/>
- <item id="appcache_update_job" hash_code="25790702" content_hash_code="27424887"/>
- <item id="asset_links" hash_code="89771989" content_hash_code="72216357"/>
- <item id="autofill_query" hash_code="88863520" content_hash_code="15563339"/>
- <item id="autofill_upload" hash_code="104798869" content_hash_code="110634763"/>
- <item id="background_fetch_context" hash_code="16469669" content_hash_code="52235434"/>
- <item id="background_performance_tracer" hash_code="84575287" content_hash_code="120154250"/>
- <item id="blink_resource_loader" hash_code="101845102" content_hash_code="69137084"/>
- <item id="blob_read" hash_code="112303907" content_hash_code="135449692"/>
- <item id="blob_reader" hash_code="5154306" content_hash_code="39702178"/>
- <item id="brandcode_config" hash_code="109679553" content_hash_code="128843792"/>
- <item id="captive_portal_service" hash_code="88754904" content_hash_code="70737580"/>
- <item id="certificate_verifier" hash_code="113553577" content_hash_code="62346354"/>
- <item id="chrome_expect_ct_reporter" hash_code="57276415" content_hash_code="137551346"/>
- <item id="chrome_feedback_report_app" hash_code="134729048" content_hash_code="73916972"/>
- <item id="chrome_variations_service" hash_code="115188287" content_hash_code="32485683"/>
- <item id="client_download_request" hash_code="125522256" content_hash_code="23897505"/>
- <item id="cloud_print_backend" hash_code="71578042" content_hash_code="89544098"/>
- <item id="cloud_print_privet_register" hash_code="24978481" content_hash_code="131359002"/>
- <item id="cloud_print_proxy" hash_code="50859288" content_hash_code="25432720"/>
- <item id="cloud_print_search" hash_code="132055347" content_hash_code="123783474"/>
- <item id="component_updater_utils" hash_code="125596241" content_hash_code="12311195"/>
- <item id="content_hash_verification_job" hash_code="64733114" content_hash_code="127912411"/>
- <item id="content_suggestion_get_favicon" hash_code="16653985" content_hash_code="134280933"/>
- <item id="CRD_ice_config_request" hash_code="49825319" content_hash_code="8740825"/>
- <item id="CRD_relay_session_request" hash_code="24058523" content_hash_code="36997811"/>
- <item id="CRD_telemetry_log" hash_code="18670926" content_hash_code="49025478"/>
- <item id="credenential_avatar" hash_code="53695122" content_hash_code="113035371"/>
- <item id="cros_recovery_image_download" hash_code="101725581" content_hash_code="23088027"/>
- <item id="cryptauth_device_sync_tickle" hash_code="96565489" content_hash_code="111962899"/>
- <item id="cryptauth_enrollment_flow_finish" hash_code="54836939" content_hash_code="122011288"/>
- <item id="cryptauth_enrollment_flow_setup" hash_code="84889397" content_hash_code="115692637"/>
- <item id="cryptauth_find_eligible_for_promotion" hash_code="20053290" content_hash_code="98613425"/>
- <item id="cryptauth_find_eligible_unlock_devices" hash_code="120000562" content_hash_code="124093652"/>
- <item id="cryptauth_get_my_devices" hash_code="136498680" content_hash_code="83420739"/>
- <item id="cryptauth_toggle_easyunlock" hash_code="25204343" content_hash_code="5092035"/>
- <item id="data_reduction_proxy_config" hash_code="485305" content_hash_code="134075813"/>
- <item id="data_reduction_proxy_pingback" hash_code="68561428" content_hash_code="78407792"/>
- <item id="data_reduction_proxy_secure_proxy_check" hash_code="131236802" content_hash_code="122297136"/>
- <item id="data_reduction_proxy_warmup" hash_code="8250451" content_hash_code="6321249"/>
- <item id="device_geolocation_request" hash_code="77673751" content_hash_code="97181773"/>
- <item id="device_management_service" hash_code="117782019" content_hash_code="127535409"/>
- <item id="devtools_free_data_source" hash_code="22774132" content_hash_code="136324050"/>
- <item id="devtools_handle_front_end_messages" hash_code="135636011" content_hash_code="18422190"/>
- <item id="devtools_hard_coded_data_source" hash_code="111565057" content_hash_code="46074423"/>
- <item id="devtools_interceptor" hash_code="98123737" content_hash_code="19053470"/>
- <item id="devtools_network_resource" hash_code="129652775" content_hash_code="24059212"/>
- <item id="dial_get_device_description" hash_code="50422598" content_hash_code="129827780"/>
- <item id="dom_distiller" hash_code="3989826" content_hash_code="106153970"/>
- <item id="domain_reliability_report_upload" hash_code="108804096" content_hash_code="35902036"/>
- <item id="domain_security_policy" hash_code="77597059" content_hash_code="30916983"/>
+ <item id="CRD_ice_config_request" hash_code="49825319" content_hash_code="8740825" os_list="linux,windows"/>
+ <item id="CRD_relay_session_request" hash_code="24058523" content_hash_code="36997811" os_list="linux,windows"/>
+ <item id="CRD_telemetry_log" hash_code="18670926" content_hash_code="49025478" os_list="linux,windows"/>
+ <item id="affiliation_lookup" hash_code="111904019" content_hash_code="81061452" os_list="linux,windows"/>
+ <item id="appcache_update_job" hash_code="25790702" content_hash_code="27424887" os_list="linux,windows"/>
+ <item id="asset_links" hash_code="89771989" content_hash_code="72216357" os_list="linux,windows"/>
+ <item id="autofill_query" hash_code="88863520" content_hash_code="15563339" os_list="linux,windows"/>
+ <item id="autofill_upload" hash_code="104798869" content_hash_code="110634763" os_list="linux,windows"/>
+ <item id="background_fetch_context" hash_code="16469669" content_hash_code="52235434" os_list="linux,windows"/>
+ <item id="background_performance_tracer" hash_code="84575287" content_hash_code="120154250" os_list="linux,windows"/>
+ <item id="blink_extension_resource_loader" hash_code="84165821" content_hash_code="3695143" os_list="linux,windows"/>
+ <item id="blink_resource_loader" hash_code="101845102" content_hash_code="69137084" os_list="linux,windows"/>
+ <item id="blob_read" hash_code="112303907" content_hash_code="135449692" os_list="linux,windows"/>
+ <item id="blob_reader" hash_code="5154306" content_hash_code="39702178" os_list="linux,windows"/>
+ <item id="brandcode_config" hash_code="109679553" content_hash_code="128843792" os_list="linux,windows"/>
+ <item id="captive_portal_service" hash_code="88754904" content_hash_code="70737580" os_list="linux,windows"/>
+ <item id="certificate_verifier" hash_code="113553577" content_hash_code="62346354" os_list="linux,windows"/>
+ <item id="chrome_cleaner" hash_code="27071967" content_hash_code="111240292" os_list="windows"/>
+ <item id="chrome_expect_ct_reporter" hash_code="57276415" content_hash_code="137551346" os_list="linux,windows"/>
+ <item id="chrome_feedback_report_app" hash_code="134729048" content_hash_code="73916972" os_list="linux,windows"/>
+ <item id="chrome_variations_service" hash_code="115188287" content_hash_code="32485683" os_list="linux,windows"/>
+ <item id="client_download_request" hash_code="125522256" content_hash_code="23897505" os_list="linux,windows"/>
+ <item id="cloud_print_backend" hash_code="71578042" content_hash_code="89544098" os_list="linux,windows"/>
+ <item id="cloud_print_privet_register" hash_code="24978481" content_hash_code="131359002" os_list="linux,windows"/>
+ <item id="cloud_print_proxy" hash_code="50859288" content_hash_code="25432720" os_list="linux,windows"/>
+ <item id="cloud_print_search" hash_code="132055347" content_hash_code="123783474" os_list="linux,windows"/>
+ <item id="component_updater_utils" hash_code="125596241" content_hash_code="12311195" os_list="linux,windows"/>
+ <item id="content_hash_verification_job" hash_code="64733114" content_hash_code="127912411" os_list="linux,windows"/>
+ <item id="content_resource_fetcher" hash_code="70796791" deprecated="2017-09-16" content_hash_code="135648626"/>
+ <item id="content_suggestion_get_favicon" hash_code="16653985" content_hash_code="134280933" os_list="linux,windows"/>
+ <item id="credenential_avatar" hash_code="53695122" content_hash_code="113035371" os_list="linux,windows"/>
+ <item id="cros_recovery_image_download" hash_code="101725581" content_hash_code="23088027" os_list="linux,windows"/>
+ <item id="cryptauth_device_sync_tickle" hash_code="96565489" content_hash_code="111962899" os_list="linux,windows"/>
+ <item id="cryptauth_enrollment_flow_finish" hash_code="54836939" content_hash_code="122011288" os_list="linux,windows"/>
+ <item id="cryptauth_enrollment_flow_setup" hash_code="84889397" content_hash_code="115692637" os_list="linux,windows"/>
+ <item id="cryptauth_find_eligible_for_promotion" hash_code="20053290" content_hash_code="98613425" os_list="linux,windows"/>
+ <item id="cryptauth_find_eligible_unlock_devices" hash_code="120000562" content_hash_code="124093652" os_list="linux,windows"/>
+ <item id="cryptauth_get_my_devices" hash_code="136498680" content_hash_code="83420739" os_list="linux,windows"/>
+ <item id="cryptauth_toggle_easyunlock" hash_code="25204343" content_hash_code="5092035" os_list="linux,windows"/>
+ <item id="data_reduction_proxy_config" hash_code="485305" content_hash_code="134075813" os_list="linux,windows"/>
+ <item id="data_reduction_proxy_pingback" hash_code="68561428" content_hash_code="78407792" os_list="linux,windows"/>
+ <item id="data_reduction_proxy_secure_proxy_check" hash_code="131236802" content_hash_code="122297136" os_list="linux,windows"/>
+ <item id="data_reduction_proxy_warmup" hash_code="8250451" content_hash_code="6321249" os_list="linux,windows"/>
+ <item id="desktop_ios_promotion" hash_code="13694792" content_hash_code="19776951" os_list="windows"/>
+ <item id="device_geolocation_request" hash_code="77673751" content_hash_code="97181773" os_list="linux,windows"/>
+ <item id="device_management_service" hash_code="117782019" content_hash_code="127535409" os_list="linux,windows"/>
+ <item id="devtools_free_data_source" hash_code="22774132" content_hash_code="136324050" os_list="linux,windows"/>
+ <item id="devtools_handle_front_end_messages" hash_code="135636011" content_hash_code="18422190" os_list="linux,windows"/>
+ <item id="devtools_hard_coded_data_source" hash_code="111565057" content_hash_code="46074423" os_list="linux,windows"/>
+ <item id="devtools_interceptor" hash_code="98123737" content_hash_code="19053470" os_list="linux,windows"/>
+ <item id="devtools_network_resource" hash_code="129652775" content_hash_code="24059212" os_list="linux,windows"/>
+ <item id="dial_get_device_description" hash_code="50422598" content_hash_code="129827780" os_list="linux,windows"/>
+ <item id="dom_distiller" hash_code="3989826" content_hash_code="106153970" os_list="linux,windows"/>
+ <item id="domain_reliability_report_upload" hash_code="108804096" content_hash_code="35902036" os_list="linux,windows"/>
+ <item id="domain_security_policy" hash_code="77597059" content_hash_code="30916983" os_list="linux,windows"/>
<item id="doodle_fetcher" hash_code="97199008" deprecated="2017-08-28" content_hash_code="87981692"/>
<item id="doodle_service" hash_code="41154842" deprecated="2017-08-28" content_hash_code="28273962"/>
- <item id="download_manager_resume" hash_code="35380758" content_hash_code="41227674"/>
- <item id="download_web_contents_frame" hash_code="56351037" content_hash_code="3657889"/>
- <item id="downloads_api_run_async" hash_code="121068967" content_hash_code="87443585"/>
- <item id="drag_download_file" hash_code="95910019" content_hash_code="126492858"/>
- <item id="extension_blacklist" hash_code="59592717" content_hash_code="116742516"/>
- <item id="extension_crx_fetcher" hash_code="21145003" content_hash_code="79150319"/>
- <item id="extension_install_signer" hash_code="50464499" content_hash_code="88088656"/>
- <item id="extension_manifest_fetcher" hash_code="5151071" content_hash_code="57885402"/>
- <item id="external_policy_fetcher" hash_code="9459438" content_hash_code="64260484"/>
- <item id="family_info" hash_code="30913825" content_hash_code="25369370"/>
- <item id="gaia_auth_check_connection_info" hash_code="4598626" content_hash_code="57347000"/>
- <item id="gaia_auth_exchange_cookies" hash_code="134289752" content_hash_code="66433230"/>
- <item id="gaia_auth_exchange_device_id" hash_code="39877119" content_hash_code="61857947"/>
- <item id="gaia_auth_fetch_for_uber" hash_code="97978464" content_hash_code="28006265"/>
- <item id="gaia_auth_get_user_info" hash_code="82167736" content_hash_code="4695017"/>
- <item id="gaia_auth_list_accounts" hash_code="35565745" content_hash_code="93669150"/>
- <item id="gaia_auth_log_out" hash_code="116426676" content_hash_code="91154233"/>
- <item id="gaia_auth_login" hash_code="91597383" content_hash_code="111911548"/>
- <item id="gaia_auth_merge_sessions" hash_code="26216847" content_hash_code="30423843"/>
- <item id="gaia_auth_revoke_token" hash_code="133982351" content_hash_code="96665330"/>
- <item id="gaia_cookie_manager_external_cc_result" hash_code="4300475" content_hash_code="31188375"/>
- <item id="gaia_oauth_client_get_token_info" hash_code="32585152" content_hash_code="128143346"/>
- <item id="gaia_oauth_client_get_tokens" hash_code="5637379" content_hash_code="12099176"/>
- <item id="gaia_oauth_client_get_user_info" hash_code="83476155" content_hash_code="35159007"/>
- <item id="gaia_oauth_client_refresh_token" hash_code="82462683" content_hash_code="22305252"/>
- <item id="gcm_channel_status_request" hash_code="18300705" content_hash_code="53862393"/>
- <item id="gcm_checkin" hash_code="65957842" content_hash_code="98259579"/>
- <item id="gcm_registration" hash_code="61656965" content_hash_code="113670632"/>
- <item id="gcm_subscription" hash_code="56434025" content_hash_code="61632174"/>
- <item id="gcm_unregistration" hash_code="119542033" content_hash_code="30144127"/>
- <item id="google_url_tracker" hash_code="5492492" content_hash_code="54474899"/>
- <item id="headless_url_request" hash_code="29865866" content_hash_code="76700151"/>
- <item id="history_notice_utils_notice" hash_code="102595701" content_hash_code="4717759"/>
- <item id="history_notice_utils_popup" hash_code="80832574" content_hash_code="11746153"/>
- <item id="icon_cacher" hash_code="103133150" content_hash_code="116368348"/>
- <item id="icon_catcher_get_large_icon" hash_code="44494884" content_hash_code="98262037"/>
- <item id="indexed_db_internals_handler" hash_code="131180348" content_hash_code="59026406"/>
- <item id="intranet_redirect_detector" hash_code="21785164" content_hash_code="62025595"/>
- <item id="invalidation_service" hash_code="72354423" content_hash_code="78425687"/>
- <item id="lib_address_input" hash_code="50816767" content_hash_code="57977576"/>
- <item id="logo_tracker" hash_code="36859107" content_hash_code="67588075"/>
- <item id="metrics_report_ukm" hash_code="727478" content_hash_code="40919254"/>
- <item id="metrics_report_uma" hash_code="727528" content_hash_code="10176197"/>
- <item id="missing" hash_code="77012883" reserved="1"/>
- <item id="navigation_url_loader" hash_code="63171670" content_hash_code="129352907"/>
- <item id="network_time_component" hash_code="46188932" content_hash_code="28051857"/>
- <item id="notification_image_reporter" hash_code="70126372" content_hash_code="29754543"/>
- <item id="ntp_contextual_suggestions_fetch" hash_code="95711309" content_hash_code="29742597"/>
- <item id="ntp_snippets_fetch" hash_code="15418154" content_hash_code="10078959"/>
- <item id="oauth2_access_token_fetcher" hash_code="27915688" content_hash_code="33501872"/>
- <item id="oauth2_mint_token_flow" hash_code="1112842" content_hash_code="70101159"/>
- <item id="ocsp_start_url_request" hash_code="60921996" content_hash_code="24127780"/>
- <item id="offline_prefetch" hash_code="19185953" content_hash_code="57248156"/>
- <item id="omnibox_navigation_observer" hash_code="61684939" content_hash_code="70941231"/>
- <item id="omnibox_prefetch_image" hash_code="109200878" content_hash_code="107906693"/>
- <item id="omnibox_result_change" hash_code="73107389" content_hash_code="24802647"/>
- <item id="omnibox_suggest" hash_code="47815025" content_hash_code="86297726"/>
- <item id="omnibox_suggest_deletion" hash_code="84212388" content_hash_code="24981550"/>
- <item id="omnibox_zerosuggest" hash_code="7687691" content_hash_code="119419625"/>
- <item id="omnibox_zerosuggest_experimental" hash_code="3813491" content_hash_code="22929259"/>
- <item id="one_google_bar_service" hash_code="78917933" content_hash_code="46527252"/>
- <item id="open_search" hash_code="107267424" content_hash_code="25715812"/>
- <item id="parallel_download_job" hash_code="135118587" content_hash_code="105330419"/>
- <item id="password_protection_request" hash_code="66322287" content_hash_code="21270837"/>
- <item id="payment_instrument_icon_fetcher" hash_code="73309970" content_hash_code="84709873"/>
- <item id="payment_manifest_downloader" hash_code="84045030" content_hash_code="19293316"/>
- <item id="payments_sync_cards" hash_code="95588446" content_hash_code="56526513"/>
- <item id="permission_reporting" hash_code="131741641" content_hash_code="7213535"/>
- <item id="permission_request_creator" hash_code="43206794" content_hash_code="73571699"/>
- <item id="persist_blob_to_indexed_db" hash_code="32030464" content_hash_code="35410079"/>
- <item id="plugins_resource_service" hash_code="49601082" content_hash_code="6877335"/>
- <item id="popular_sites_fetch" hash_code="50755044" content_hash_code="6910083"/>
- <item id="ppapi_download_request" hash_code="135967426" content_hash_code="110461402"/>
- <item id="prefetch_download" hash_code="44583172" content_hash_code="100587691"/>
- <item id="printer_job_handler" hash_code="67638271" content_hash_code="52577454"/>
- <item id="privet_http_impl" hash_code="71251498" content_hash_code="107348604"/>
- <item id="profile_avatar" hash_code="51164680" content_hash_code="113550845"/>
- <item id="profile_resetter_upload" hash_code="105330607" content_hash_code="129329171"/>
- <item id="proxy_script_fetcher" hash_code="37531401" content_hash_code="31866133"/>
- <item id="ranker_url_fetcher" hash_code="95682324" content_hash_code="45958626"/>
- <item id="rappor_report" hash_code="44606780" content_hash_code="111287826"/>
- <item id="refresh_token_annotation_request" hash_code="7433837" content_hash_code="59226150"/>
- <item id="remote_suggestions_provider" hash_code="49544361" content_hash_code="126329742"/>
- <item id="render_view_context_menu" hash_code="25844439" content_hash_code="69471170"/>
- <item id="renderer_initiated_download" hash_code="116443055" content_hash_code="37846436"/>
- <item id="reporting" hash_code="109891200" content_hash_code="125758928"/>
- <item id="resource_dispatcher_host" hash_code="81157007" content_hash_code="35725167"/>
+ <item id="download_manager_resume" hash_code="35380758" content_hash_code="41227674" os_list="linux,windows"/>
+ <item id="download_web_contents_frame" hash_code="56351037" content_hash_code="3657889" os_list="linux,windows"/>
+ <item id="downloads_api_run_async" hash_code="121068967" content_hash_code="87443585" os_list="linux,windows"/>
+ <item id="drag_download_file" hash_code="95910019" content_hash_code="126492858" os_list="linux,windows"/>
+ <item id="extension_blacklist" hash_code="59592717" content_hash_code="116742516" os_list="linux,windows"/>
+ <item id="extension_crx_fetcher" hash_code="21145003" content_hash_code="79150319" os_list="linux,windows"/>
+ <item id="extension_install_signer" hash_code="50464499" content_hash_code="88088656" os_list="linux,windows"/>
+ <item id="extension_manifest_fetcher" hash_code="5151071" content_hash_code="57885402" os_list="linux,windows"/>
+ <item id="external_policy_fetcher" hash_code="9459438" content_hash_code="64260484" os_list="linux,windows"/>
+ <item id="family_info" hash_code="30913825" content_hash_code="25369370" os_list="linux,windows"/>
+ <item id="favicon_loader" hash_code="112189210" content_hash_code="70773116" os_list="linux,windows"/>
+ <item id="gaia_auth_check_connection_info" hash_code="4598626" content_hash_code="57347000" os_list="linux,windows"/>
+ <item id="gaia_auth_exchange_cookies" hash_code="134289752" content_hash_code="66433230" os_list="linux,windows"/>
+ <item id="gaia_auth_exchange_device_id" hash_code="39877119" content_hash_code="61857947" os_list="linux,windows"/>
+ <item id="gaia_auth_fetch_for_uber" hash_code="97978464" content_hash_code="28006265" os_list="linux,windows"/>
+ <item id="gaia_auth_get_user_info" hash_code="82167736" content_hash_code="4695017" os_list="linux,windows"/>
+ <item id="gaia_auth_list_accounts" hash_code="35565745" content_hash_code="93669150" os_list="linux,windows"/>
+ <item id="gaia_auth_log_out" hash_code="116426676" content_hash_code="91154233" os_list="linux,windows"/>
+ <item id="gaia_auth_login" hash_code="91597383" content_hash_code="111911548" os_list="linux,windows"/>
+ <item id="gaia_auth_merge_sessions" hash_code="26216847" content_hash_code="30423843" os_list="linux,windows"/>
+ <item id="gaia_auth_revoke_token" hash_code="133982351" content_hash_code="96665330" os_list="linux,windows"/>
+ <item id="gaia_cookie_manager_external_cc_result" hash_code="4300475" content_hash_code="31188375" os_list="linux,windows"/>
+ <item id="gaia_oauth_client_get_token_info" hash_code="32585152" content_hash_code="128143346" os_list="linux,windows"/>
+ <item id="gaia_oauth_client_get_tokens" hash_code="5637379" content_hash_code="12099176" os_list="linux,windows"/>
+ <item id="gaia_oauth_client_get_user_info" hash_code="83476155" content_hash_code="35159007" os_list="linux,windows"/>
+ <item id="gaia_oauth_client_refresh_token" hash_code="82462683" content_hash_code="22305252" os_list="linux,windows"/>
+ <item id="gcm_channel_status_request" hash_code="18300705" content_hash_code="53862393" os_list="linux,windows"/>
+ <item id="gcm_checkin" hash_code="65957842" content_hash_code="98259579" os_list="linux,windows"/>
+ <item id="gcm_registration" hash_code="61656965" content_hash_code="113670632" os_list="linux,windows"/>
+ <item id="gcm_subscription" hash_code="56434025" content_hash_code="61632174" os_list="linux,windows"/>
+ <item id="gcm_unregistration" hash_code="119542033" content_hash_code="30144127" os_list="linux,windows"/>
+ <item id="google_url_tracker" hash_code="5492492" content_hash_code="54474899" os_list="linux,windows"/>
+ <item id="headless_url_request" hash_code="29865866" content_hash_code="76700151" os_list="linux,windows"/>
+ <item id="history_notice_utils_notice" hash_code="102595701" content_hash_code="4717759" os_list="linux,windows"/>
+ <item id="history_notice_utils_popup" hash_code="80832574" content_hash_code="11746153" os_list="linux,windows"/>
+ <item id="icon_cacher" hash_code="103133150" content_hash_code="116368348" os_list="linux,windows"/>
+ <item id="icon_catcher_get_large_icon" hash_code="44494884" content_hash_code="98262037" os_list="linux,windows"/>
+ <item id="indexed_db_internals_handler" hash_code="131180348" content_hash_code="59026406" os_list="linux,windows"/>
+ <item id="intranet_redirect_detector" hash_code="21785164" content_hash_code="62025595" os_list="linux,windows"/>
+ <item id="invalidation_service" hash_code="72354423" content_hash_code="78425687" os_list="linux,windows"/>
+ <item id="lib_address_input" hash_code="50816767" content_hash_code="57977576" os_list="linux,windows"/>
+ <item id="logo_tracker" hash_code="36859107" content_hash_code="67588075" os_list="linux,windows"/>
+ <item id="metrics_report_ukm" hash_code="727478" content_hash_code="40919254" os_list="linux,windows"/>
+ <item id="metrics_report_uma" hash_code="727528" content_hash_code="10176197" os_list="linux,windows"/>
+ <item id="missing" hash_code="77012883" reserved="1" os_list="all"/>
+ <item id="mojo_context_state" hash_code="93232258" content_hash_code="124821232" os_list="linux,windows"/>
+ <item id="navigation_url_loader" hash_code="63171670" content_hash_code="129352907" os_list="linux,windows"/>
+ <item id="net_error_helper" hash_code="60071001" content_hash_code="68322861" os_list="linux,windows"/>
+ <item id="network_time_component" hash_code="46188932" content_hash_code="28051857" os_list="linux,windows"/>
+ <item id="notification_image_reporter" hash_code="70126372" content_hash_code="29754543" os_list="linux,windows"/>
+ <item id="ntp_contextual_suggestions_fetch" hash_code="95711309" content_hash_code="29742597" os_list="linux,windows"/>
+ <item id="ntp_snippets_fetch" hash_code="15418154" content_hash_code="10078959" os_list="linux,windows"/>
+ <item id="oauth2_access_token_fetcher" hash_code="27915688" content_hash_code="33501872" os_list="linux,windows"/>
+ <item id="oauth2_mint_token_flow" hash_code="1112842" content_hash_code="70101159" os_list="linux,windows"/>
+ <item id="ocsp_start_url_request" hash_code="60921996" content_hash_code="24127780" os_list="linux"/>
+ <item id="offline_prefetch" hash_code="19185953" content_hash_code="57248156" os_list="linux,windows"/>
+ <item id="omnibox_navigation_observer" hash_code="61684939" content_hash_code="70941231" os_list="linux,windows"/>
+ <item id="omnibox_prefetch_image" hash_code="109200878" content_hash_code="107906693" os_list="linux,windows"/>
+ <item id="omnibox_result_change" hash_code="73107389" content_hash_code="24802647" os_list="linux,windows"/>
+ <item id="omnibox_suggest" hash_code="47815025" content_hash_code="86297726" os_list="linux,windows"/>
+ <item id="omnibox_suggest_deletion" hash_code="84212388" content_hash_code="24981550" os_list="linux,windows"/>
+ <item id="omnibox_zerosuggest" hash_code="7687691" content_hash_code="119419625" os_list="linux,windows"/>
+ <item id="omnibox_zerosuggest_experimental" hash_code="3813491" content_hash_code="22929259" os_list="linux,windows"/>
+ <item id="one_google_bar_service" hash_code="78917933" content_hash_code="46527252" os_list="linux,windows"/>
+ <item id="open_search" hash_code="107267424" content_hash_code="25715812" os_list="linux,windows"/>
+ <item id="parallel_download_job" hash_code="135118587" content_hash_code="105330419" os_list="linux,windows"/>
+ <item id="password_protection_request" hash_code="66322287" content_hash_code="21270837" os_list="linux,windows"/>
+ <item id="payment_instrument_icon_fetcher" hash_code="73309970" deprecated="2017-09-16" content_hash_code="84709873"/>
+ <item id="payment_manifest_downloader" hash_code="84045030" content_hash_code="19293316" os_list="linux,windows"/>
+ <item id="payments_sync_cards" hash_code="95588446" content_hash_code="56526513" os_list="linux,windows"/>
+ <item id="pdf_plugin_placeholder" hash_code="56866367" content_hash_code="16907221" os_list="linux,windows"/>
+ <item id="permission_reporting" hash_code="131741641" content_hash_code="7213535" os_list="linux,windows"/>
+ <item id="permission_request_creator" hash_code="43206794" content_hash_code="73571699" os_list="linux,windows"/>
+ <item id="persist_blob_to_indexed_db" hash_code="32030464" content_hash_code="35410079" os_list="linux,windows"/>
+ <item id="plugins_resource_service" hash_code="49601082" content_hash_code="6877335" os_list="linux,windows"/>
+ <item id="popular_sites_fetch" hash_code="50755044" content_hash_code="6910083" os_list="linux,windows"/>
+ <item id="ppapi_download_request" hash_code="135967426" content_hash_code="110461402" os_list="linux,windows"/>
+ <item id="prefetch_download" hash_code="44583172" content_hash_code="100587691" os_list="linux,windows"/>
+ <item id="printer_job_handler" hash_code="67638271" content_hash_code="52577454" os_list="linux,windows"/>
+ <item id="privet_http_impl" hash_code="71251498" content_hash_code="107348604" os_list="linux,windows"/>
+ <item id="profile_avatar" hash_code="51164680" content_hash_code="113550845" os_list="linux,windows"/>
+ <item id="profile_resetter_upload" hash_code="105330607" content_hash_code="129329171" os_list="linux,windows"/>
+ <item id="proxy_script_fetcher" hash_code="37531401" content_hash_code="31866133" os_list="linux,windows"/>
+ <item id="ranker_url_fetcher" hash_code="95682324" content_hash_code="45958626" os_list="linux,windows"/>
+ <item id="rappor_report" hash_code="44606780" content_hash_code="111287826" os_list="linux,windows"/>
+ <item id="refresh_token_annotation_request" hash_code="7433837" content_hash_code="59226150" os_list="linux,windows"/>
+ <item id="remote_suggestions_provider" hash_code="49544361" content_hash_code="126329742" os_list="linux,windows"/>
+ <item id="render_view_context_menu" hash_code="25844439" content_hash_code="69471170" os_list="linux,windows"/>
+ <item id="renderer_initiated_download" hash_code="116443055" content_hash_code="37846436" os_list="linux,windows"/>
+ <item id="reporting" hash_code="109891200" content_hash_code="125758928" os_list="linux,windows"/>
+ <item id="resource_dispatcher_host" hash_code="81157007" content_hash_code="35725167" os_list="linux,windows"/>
<item id="resource_dispather_host" hash_code="58963098" deprecated="2017-08-23" content_hash_code="72581415"/>
- <item id="resource_prefetch" hash_code="110815970" content_hash_code="39251261"/>
- <item id="safe_browsing_backup_request" hash_code="106980485" content_hash_code="101760679"/>
- <item id="safe_browsing_cache_collector" hash_code="115907811" content_hash_code="36392362"/>
- <item id="safe_browsing_certificate_error_reporting" hash_code="66590631" content_hash_code="50197576"/>
- <item id="safe_browsing_chunk_backup_request" hash_code="79957943" content_hash_code="133850277"/>
- <item id="safe_browsing_client_side_malware_detector" hash_code="102935425" content_hash_code="79591279"/>
- <item id="safe_browsing_client_side_phishing_detector" hash_code="1313982" content_hash_code="50199143"/>
- <item id="safe_browsing_extended_reporting" hash_code="42848942" content_hash_code="50089173"/>
- <item id="safe_browsing_feedback" hash_code="44583821" content_hash_code="114076664"/>
- <item id="safe_browsing_g4_update" hash_code="75153841" content_hash_code="112049516"/>
- <item id="safe_browsing_get_full_hash" hash_code="68745894" content_hash_code="21739198"/>
- <item id="safe_browsing_incident" hash_code="124950347" content_hash_code="58481082"/>
- <item id="safe_browsing_module_loader" hash_code="6019475" content_hash_code="49511650"/>
- <item id="safe_browsing_v4_get_hash" hash_code="8561691" content_hash_code="132435617"/>
- <item id="safe_search_url_reporter" hash_code="119677115" content_hash_code="67393078"/>
- <item id="save_file_manager" hash_code="56275203" content_hash_code="56692339"/>
- <item id="sdch_dictionary_fetch" hash_code="47152935" content_hash_code="16764294"/>
- <item id="service_worker_write_to_cache_job" hash_code="117963307" content_hash_code="18065724"/>
- <item id="signed_in_profile_avatar" hash_code="108903331" content_hash_code="72850619"/>
- <item id="speech_recognition_downstream" hash_code="26096088" content_hash_code="120733233"/>
- <item id="speech_recognition_upstream" hash_code="66846958" content_hash_code="7706219"/>
- <item id="spellcheck_hunspell_dictionary" hash_code="117649486" content_hash_code="45660952"/>
- <item id="spellcheck_lookup" hash_code="132553989" content_hash_code="27978613"/>
- <item id="ssl_name_mismatch_lookup" hash_code="114468207" content_hash_code="97619078"/>
- <item id="suggestions_image_manager" hash_code="13211343" content_hash_code="36271280"/>
- <item id="suggestions_service" hash_code="35370363" content_hash_code="66296423"/>
- <item id="supervised_user_refresh_token_fetcher" hash_code="136117054" content_hash_code="101636136"/>
- <item id="supervised_user_url_filter" hash_code="14257952" content_hash_code="30470003"/>
- <item id="supervised_users_blacklist" hash_code="78544924" content_hash_code="10924669"/>
- <item id="sync_attachment_downloader" hash_code="26372521" content_hash_code="70097603"/>
- <item id="sync_attachment_uploader" hash_code="132657055" content_hash_code="25152853"/>
- <item id="sync_file_system" hash_code="102819690" content_hash_code="52153962"/>
- <item id="sync_http_bridge" hash_code="57144960" content_hash_code="32868346"/>
- <item id="sync_stop_reporter" hash_code="5021348" content_hash_code="56902850"/>
- <item id="test" hash_code="3556498" reserved="1"/>
- <item id="test_partial" hash_code="22096011" reserved="1"/>
- <item id="thumbnail_source" hash_code="135251783" content_hash_code="31086298"/>
- <item id="translate_url_fetcher" hash_code="137116619" content_hash_code="1127120"/>
- <item id="undefined" hash_code="45578882" reserved="1"/>
- <item id="url_fetcher_downloader" hash_code="113231892" content_hash_code="61085066"/>
- <item id="url_prevision_fetcher" hash_code="118389509" content_hash_code="66145513"/>
- <item id="user_info_fetcher" hash_code="22265491" content_hash_code="72016232"/>
- <item id="web_history_counter" hash_code="137457845" content_hash_code="16965644"/>
- <item id="web_history_expire" hash_code="60946824" content_hash_code="137378962"/>
- <item id="web_history_expire_between_dates" hash_code="126122632" content_hash_code="78470619"/>
- <item id="web_history_query" hash_code="17400350" content_hash_code="126490106"/>
- <item id="webrtc_log_upload" hash_code="62443804" content_hash_code="33661169"/>
- <item id="websocket_stream" hash_code="17188928" content_hash_code="7250776"/>
- <item id="webstore_data_fetcher" hash_code="26302604" content_hash_code="24000746"/>
- <item id="webstore_install_helper" hash_code="25921771" content_hash_code="10206361"/>
- <item id="webstore_installer" hash_code="18764319" content_hash_code="11030110"/>
- <item id="webui_content_scripts_download" hash_code="100545943" content_hash_code="119898059"/>
+ <item id="resource_prefetch" hash_code="110815970" content_hash_code="39251261" os_list="linux,windows"/>
+ <item id="rlz_ping" hash_code="99279418" content_hash_code="102108802" os_list="windows"/>
+ <item id="safe_browsing_backup_request" hash_code="106980485" content_hash_code="101760679" os_list="linux,windows"/>
+ <item id="safe_browsing_cache_collector" hash_code="115907811" content_hash_code="36392362" os_list="linux,windows"/>
+ <item id="safe_browsing_certificate_error_reporting" hash_code="66590631" content_hash_code="50197576" os_list="linux,windows"/>
+ <item id="safe_browsing_chunk_backup_request" hash_code="79957943" content_hash_code="133850277" os_list="linux,windows"/>
+ <item id="safe_browsing_client_side_malware_detector" hash_code="102935425" content_hash_code="79591279" os_list="linux,windows"/>
+ <item id="safe_browsing_client_side_phishing_detector" hash_code="1313982" content_hash_code="50199143" os_list="linux,windows"/>
+ <item id="safe_browsing_extended_reporting" hash_code="42848942" content_hash_code="50089173" os_list="linux,windows"/>
+ <item id="safe_browsing_feedback" hash_code="44583821" content_hash_code="114076664" os_list="linux,windows"/>
+ <item id="safe_browsing_g4_update" hash_code="75153841" content_hash_code="112049516" os_list="linux,windows"/>
+ <item id="safe_browsing_get_full_hash" hash_code="68745894" content_hash_code="21739198" os_list="linux,windows"/>
+ <item id="safe_browsing_incident" hash_code="124950347" content_hash_code="58481082" os_list="linux,windows"/>
+ <item id="safe_browsing_module_loader" hash_code="6019475" content_hash_code="49511650" os_list="linux,windows"/>
+ <item id="safe_browsing_v4_get_hash" hash_code="8561691" content_hash_code="132435617" os_list="linux,windows"/>
+ <item id="safe_search_url_reporter" hash_code="119677115" content_hash_code="67393078" os_list="linux,windows"/>
+ <item id="save_file_manager" hash_code="56275203" content_hash_code="56692339" os_list="linux,windows"/>
+ <item id="sdch_dictionary_fetch" hash_code="47152935" deprecated="2017-09-16" content_hash_code="16764294"/>
+ <item id="service_worker_navigation_preload" hash_code="129872904" content_hash_code="79473248" os_list="linux,windows"/>
+ <item id="service_worker_write_to_cache_job" hash_code="117963307" content_hash_code="18065724" os_list="linux,windows"/>
+ <item id="signed_in_profile_avatar" hash_code="108903331" content_hash_code="72850619" os_list="linux,windows"/>
+ <item id="speech_recognition_downstream" hash_code="26096088" content_hash_code="120733233" os_list="linux,windows"/>
+ <item id="speech_recognition_upstream" hash_code="66846958" content_hash_code="7706219" os_list="linux,windows"/>
+ <item id="spellcheck_hunspell_dictionary" hash_code="117649486" content_hash_code="45660952" os_list="linux,windows"/>
+ <item id="spellcheck_lookup" hash_code="132553989" content_hash_code="27978613" os_list="linux,windows"/>
+ <item id="ssl_name_mismatch_lookup" hash_code="114468207" content_hash_code="97619078" os_list="linux,windows"/>
+ <item id="suggestions_image_manager" hash_code="13211343" content_hash_code="36271280" os_list="linux,windows"/>
+ <item id="suggestions_service" hash_code="35370363" content_hash_code="66296423" os_list="linux,windows"/>
+ <item id="supervised_user_refresh_token_fetcher" hash_code="136117054" content_hash_code="101636136" os_list="linux,windows"/>
+ <item id="supervised_user_url_filter" hash_code="14257952" content_hash_code="30470003" os_list="linux,windows"/>
+ <item id="supervised_users_blacklist" hash_code="78544924" content_hash_code="10924669" os_list="linux,windows"/>
+ <item id="sync_attachment_downloader" hash_code="26372521" content_hash_code="70097603" os_list="linux,windows"/>
+ <item id="sync_attachment_uploader" hash_code="132657055" content_hash_code="25152853" os_list="linux,windows"/>
+ <item id="sync_file_system" hash_code="102819690" content_hash_code="52153962" os_list="linux,windows"/>
+ <item id="sync_http_bridge" hash_code="57144960" content_hash_code="32868346" os_list="linux,windows"/>
+ <item id="sync_stop_reporter" hash_code="5021348" content_hash_code="56902850" os_list="linux,windows"/>
+ <item id="test" hash_code="3556498" reserved="1" os_list="all"/>
+ <item id="test_partial" hash_code="22096011" reserved="1" os_list="all"/>
+ <item id="thumbnail_source" hash_code="135251783" content_hash_code="31086298" os_list="linux,windows"/>
+ <item id="translate_url_fetcher" hash_code="137116619" content_hash_code="1127120" os_list="linux,windows"/>
+ <item id="undefined" hash_code="45578882" reserved="1" os_list="all"/>
+ <item id="url_fetcher_downloader" hash_code="113231892" content_hash_code="61085066" os_list="linux,windows"/>
+ <item id="url_prevision_fetcher" hash_code="118389509" content_hash_code="66145513" os_list="linux,windows"/>
+ <item id="user_info_fetcher" hash_code="22265491" content_hash_code="72016232" os_list="linux,windows"/>
+ <item id="web_history_counter" hash_code="137457845" content_hash_code="16965644" os_list="linux,windows"/>
+ <item id="web_history_expire" hash_code="60946824" content_hash_code="137378962" os_list="linux,windows"/>
+ <item id="web_history_expire_between_dates" hash_code="126122632" content_hash_code="78470619" os_list="linux,windows"/>
+ <item id="web_history_query" hash_code="17400350" content_hash_code="126490106" os_list="linux,windows"/>
+ <item id="webrtc_log_upload" hash_code="62443804" content_hash_code="33661169" os_list="linux,windows"/>
+ <item id="websocket_stream" hash_code="17188928" content_hash_code="7250776" os_list="linux,windows"/>
+ <item id="webstore_data_fetcher" hash_code="26302604" content_hash_code="24000746" os_list="linux,windows"/>
+ <item id="webstore_install_helper" hash_code="25921771" content_hash_code="10206361" os_list="linux,windows"/>
+ <item id="webstore_installer" hash_code="18764319" content_hash_code="11030110" os_list="linux,windows"/>
+ <item id="webui_content_scripts_download" hash_code="100545943" content_hash_code="119898059" os_list="linux,windows"/>
</annotations>
diff --git a/chromium/tools/uberblame.py b/chromium/tools/uberblame.py
new file mode 100755
index 00000000000..282bb1f5d46
--- /dev/null
+++ b/chromium/tools/uberblame.py
@@ -0,0 +1,556 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import cgi
+import colorsys
+import difflib
+import random
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import textwrap
+import webbrowser
+
+
+class TokenContext(object):
+ """Metadata about a token.
+
+ Attributes:
+ row: Row index of the token in the data file.
+ column: Column index of the token in the data file.
+ token: The token string.
+ commit: Hash of the git commit that added this token.
+ """
+ def __init__(self, row, column, token, commit=None):
+ self.row = row
+ self.column = column
+ self.token = token
+ self.commit = commit
+
+
+class Commit(object):
+ """Commit data.
+
+ Attributes:
+ hash: The commit hash.
+ diff: The commit diff.
+ """
+ def __init__(self, hash, diff):
+ self.hash = hash
+ self.diff = diff
+
+
+def tokenize_data(data):
+ """Tokenizes |data|.
+
+ Args:
+ data: String to tokenize.
+
+ Returns:
+ A list of TokenContexts.
+ """
+ contexts = []
+ in_identifier = False
+ identifier_start = 0
+ identifier = ''
+ row = 0
+ column = 0
+ line_contexts = []
+
+ for c in data + '\n':
+ if c.isalnum() or c == '_':
+ if in_identifier:
+ identifier += c
+ else:
+ in_identifier = True
+ identifier_start = column
+ identifier = c
+ else:
+ if in_identifier:
+ line_contexts.append(
+ TokenContext(row, identifier_start, identifier))
+ in_identifier = False
+ if not c.isspace():
+ line_contexts.append(TokenContext(row, column, c))
+
+ if c == '\n':
+ row += 1
+ column = 0
+ contexts.append(line_contexts)
+ line_tokens = []
+ line_contexts = []
+ else:
+ column += 1
+ return contexts
+
+
+def compute_unified_diff(old_tokens, new_tokens):
+ """Computes the diff between |old_tokens| and |new_tokens|.
+
+ Args:
+ old_tokens: Token strings corresponding to the old data.
+ new_tokens: Token strings corresponding to the new data.
+
+ Returns:
+ The diff, in unified diff format.
+ """
+ return difflib.unified_diff(old_tokens, new_tokens, n=0, lineterm='')
+
+
+def parse_chunk_header_file_range(file_range):
+ """Parses a chunk header file range.
+
+ Diff chunk headers have the form:
+ @@ -<file-range> +<file-range> @@
+ File ranges have the form:
+ <start line number>,<number of lines changed>
+
+ Args:
+ file_range: A chunk header file range.
+
+ Returns:
+ A tuple (range_start, range_end). The endpoints are adjusted such that
+ iterating over [range_start, range_end) will give the changed indices.
+ """
+ if ',' in file_range:
+ file_range_parts = file_range.split(',')
+ start = int(file_range_parts[0])
+ amount = int(file_range_parts[1])
+ if amount == 0:
+ return (start, start)
+ return (start - 1, start + amount - 1)
+ else:
+ return (int(file_range) - 1, int(file_range))
+
+
+def compute_changed_token_indices(previous_tokens, current_tokens):
+ """Computes changed and added tokens.
+
+ Args:
+ previous_tokens: Tokens corresponding to the old file.
+ current_tokens: Tokens corresponding to the new file.
+
+ Returns:
+ A tuple (added_tokens, changed_tokens).
+ added_tokens: A list of indices into |current_tokens|.
+ changed_tokens: A map of indices into |current_tokens| to
+ indices into |previous_tokens|.
+ """
+ prev_file_chunk_end = 0
+ prev_patched_chunk_end = 0
+ added_tokens = []
+ changed_tokens = {}
+ for line in compute_unified_diff(previous_tokens, current_tokens):
+ if line.startswith("@@"):
+ parts = line.split(' ')
+ removed = parts[1].lstrip('-')
+ removed_start, removed_end = parse_chunk_header_file_range(removed)
+ added = parts[2].lstrip('+')
+ added_start, added_end = parse_chunk_header_file_range(added)
+ for i in range(added_start, added_end):
+ added_tokens.append(i)
+ for i in range(0, removed_start - prev_patched_chunk_end):
+ changed_tokens[prev_file_chunk_end + i] = prev_patched_chunk_end + i
+ prev_patched_chunk_end = removed_end
+ prev_file_chunk_end = added_end
+ for i in range(0, len(previous_tokens) - prev_patched_chunk_end):
+ changed_tokens[prev_file_chunk_end + i] = prev_patched_chunk_end + i
+ return added_tokens, changed_tokens
+
+
+def flatten_nested_list(l):
+ """Flattens a list and provides a mapping from elements in the list back
+ into the nested list.
+
+ Args:
+ l: A list of lists.
+
+ Returns:
+ A tuple (flattened, index_to_position):
+ flattened: The flattened list.
+ index_to_position: A list of pairs (r, c) such that
+ index_to_position[i] == (r, c); flattened[i] == l[r][c]
+ """
+ flattened = []
+ index_to_position = {}
+ r = 0
+ c = 0
+ for nested_list in l:
+ for element in nested_list:
+ index_to_position[len(flattened)] = (r, c)
+ flattened.append(element)
+ c += 1
+ r += 1
+ c = 0
+ return (flattened, index_to_position)
+
+
+def compute_changed_token_positions(previous_tokens, current_tokens):
+ """Computes changed and added token positions.
+
+ Args:
+ previous_tokens: A list of lists of token strings. Lines in the file
+ correspond to the nested lists.
+ current_tokens: A list of lists of token strings. Lines in the file
+ correspond to the nested lists.
+
+ Returns:
+ A tuple (added_token_positions, changed_token_positions):
+ added_token_positions: A list of pairs that index into |current_tokens|.
+ changed_token_positions: A map from pairs that index into
+ |current_tokens| to pairs that index into |previous_tokens|.
+ """
+ flat_previous_tokens, previous_index_to_position = flatten_nested_list(
+ previous_tokens)
+ flat_current_tokens, current_index_to_position = flatten_nested_list(
+ current_tokens)
+ added_indices, changed_indices = compute_changed_token_indices(
+ flat_previous_tokens, flat_current_tokens)
+ added_token_positions = [current_index_to_position[i] for i in added_indices]
+ changed_token_positions = {
+ current_index_to_position[current_i]:
+ previous_index_to_position[changed_indices[current_i]]
+ for current_i in changed_indices
+ }
+ return (added_token_positions, changed_token_positions)
+
+
+def parse_chunks_from_diff(diff):
+ """Returns a generator of chunk data from a diff.
+
+ Args:
+ diff: A list of strings, with each string being a line from a diff
+ in unified diff format.
+
+ Returns:
+ A generator of tuples (added_lines_start, added_lines_end,
+ removed_lines, removed_lines_start)
+ """
+ in_chunk = False
+ chunk_previous = []
+ previous_start = None
+ current_start = None
+ current_end = None
+ for line in diff:
+ if line.startswith('@@'):
+ if in_chunk:
+ yield (current_start, current_end,
+ chunk_previous, previous_start)
+ parts = line.split(' ')
+ previous = parts[1].lstrip('-')
+ previous_start, _ = parse_chunk_header_file_range(previous)
+ current = parts[2].lstrip('+')
+ current_start, current_end = parse_chunk_header_file_range(current)
+ in_chunk = True
+ chunk_previous = []
+ elif in_chunk and line.startswith('-'):
+ chunk_previous.append(line[1:])
+ if current_start != None:
+ yield (current_start, current_end,
+ chunk_previous, previous_start)
+
+
+def should_skip_commit(commit):
+ """Decides if |commit| should be skipped when computing the blame.
+
+ Commit 5d4451e deleted all files in the repo except for DEPS. The
+ next commit, 1e7896, brought them back. This is a hack to skip
+ those commits (except for the files they modified). If we did not
+ do this, changes would be incorrectly attributed to 1e7896.
+
+ Args:
+ commit: A Commit object.
+
+ Returns:
+ A boolean indicating if this commit should be skipped.
+ """
+ banned_commits = [
+ '1e78967ed2f1937b3809c19d91e7dd62d756d307',
+ '5d4451ebf298d9d71f716cc0135f465cec41fcd0',
+ ]
+ if commit.hash not in banned_commits:
+ return False
+ banned_commits_file_exceptions = [
+ 'DEPS',
+ 'chrome/browser/ui/views/file_manager_dialog_browsertest.cc',
+ ]
+ for line in commit.diff:
+ if line.startswith('---') or line.startswith('+++'):
+ if line.split(' ')[1] in banned_commits_file_exceptions:
+ return False
+ elif line.startswith('@@'):
+ return True
+ assert False
+
+
+def generate_substrings(file):
+ """Generates substrings from a file stream, where substrings are
+ separated by '\0'.
+
+ For example, the input:
+ 'a\0bc\0\0\0d\0'
+ would produce the output:
+ ['a', 'bc', 'd']
+
+ Args:
+ file: A readable file.
+ """
+ data = ''
+ while True:
+ ch = file.read(1)
+ if ch == '':
+ break
+ if ch == '\0':
+ if data != '':
+ yield data
+ data = ''
+ else:
+ data += ch
+ if data != '':
+ yield data
+
+
+def generate_commits(git_log_stdout):
+ """Parses git log output into a stream of Commit objects.
+ """
+ substring_generator = generate_substrings(git_log_stdout)
+ while True:
+ hash = substring_generator.next().strip('\n')
+ diff = substring_generator.next().strip('\n').split('\n')
+ yield Commit(hash, diff)
+
+
+def uberblame_aux(file_name, git_log_stdout, data):
+ """Computes the uberblame of file |file_name|.
+
+ Args:
+ file_name: File to uberblame.
+ git_log_stdout: A file object that represents the git log output.
+ data: A string containing the data of file |file_name|.
+
+ Returns:
+ A tuple (data, blame).
+ data: File contents.
+ blame: A list of TokenContexts.
+ """
+ blame = tokenize_data(data)
+
+ blamed_tokens = 0
+ total_tokens = len(blame)
+ uber_blame = (data, blame[:])
+
+ for commit in generate_commits(git_log_stdout):
+ if should_skip_commit(commit):
+ continue
+
+ offset = 0
+ for (added_lines_start, added_lines_end, removed_lines,
+ removed_lines_start) in parse_chunks_from_diff(commit.diff):
+ added_lines_start += offset
+ added_lines_end += offset
+ previous_contexts = [token_lines
+ for line_previous in removed_lines
+ for token_lines in tokenize_data(line_previous)]
+ previous_tokens = [
+ [context.token for context in contexts]
+ for contexts in previous_contexts
+ ]
+ current_contexts = blame[added_lines_start:added_lines_end]
+ current_tokens = [
+ [context.token for context in contexts]
+ for contexts in current_contexts
+ ]
+ added_token_positions, changed_token_positions = (
+ compute_changed_token_positions(previous_tokens, current_tokens))
+ for r, c in added_token_positions:
+ current_contexts[r][c].commit = commit.hash
+ blamed_tokens += 1
+ for r, c in changed_token_positions:
+ pr, pc = changed_token_positions[(r, c)]
+ previous_contexts[pr][pc] = current_contexts[r][c]
+
+ assert added_lines_start <= added_lines_end <= len(blame)
+ current_blame_size = len(blame)
+ blame[added_lines_start:added_lines_end] = previous_contexts
+ offset += len(blame) - current_blame_size
+
+ assert blame == [] or blame == [[]]
+ return uber_blame
+
+
+def uberblame(file_name, revision):
+ """Computes the uberblame of file |file_name|.
+
+ Args:
+ file_name: File to uberblame.
+ revision: The revision to start the uberblame at.
+
+ Returns:
+ A tuple (data, blame).
+ data: File contents.
+ blame: A list of TokenContexts.
+ """
+ cmd_git_log = ['git', 'log', '--minimal', '--no-prefix', '--follow', '-m',
+ '--first-parent', '-p', '-U0', '-z', '--format=%x00%h',
+ revision, '--', file_name]
+ git_log = subprocess.Popen(cmd_git_log,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ data = subprocess.check_output(
+ ['git', 'show', '%s:%s' % (revision, file_name)])
+ data, blame = uberblame_aux(file_name, git_log.stdout, data)
+
+ _, stderr = git_log.communicate()
+ if git_log.returncode != 0:
+ raise subprocess.CalledProcessError(git_log.returncode, cmd_git_log, stderr)
+ return data, blame
+
+
+def generate_pastel_color():
+ (h, l, s) = (random.uniform(0, 1),
+ random.uniform(0.8, 0.9),
+ random.uniform(0.5, 1))
+ (r, g, b) = colorsys.hls_to_rgb(h, l, s)
+ return "#%0.2X%0.2X%0.2X" % (int(r*255), int(g*255), int(b*255))
+
+
+def visualize_uberblame(data, blame):
+ """Creates and displays a web page to visualize |blame|.
+
+ Args:
+ data: The data file as returned by uberblame().
+ blame: A list of TokenContexts as returned by uberblame().
+ """
+ # Use the same seed for the color generator on each run so that
+ # loading the same blame of the same file twice will result in the
+ # same generated HTML page.
+ random.seed(0x52937865ec62d1ea)
+ html = """\
+ <html>
+ <head>
+ <style>
+ body {
+ font-family: "Courier New";
+ }
+ pre {
+ display: inline;
+ }
+ a {
+ color: #000000;
+ text-decoration: none;
+ }
+ span {
+ outline: 1pt solid #00000030;
+ outline-offset: -1pt;
+ }
+ #linenums {
+ text-align: right;
+ }
+ </style>
+ </head>
+ <body>
+ <table>
+ <tbody>
+ <tr>
+ <td valign="top" id="linenums">
+ <pre>%s</pre>
+ </td>
+ <td valign="top">
+ <pre>%s</pre>
+ </td>
+ </tr>
+ </tbody>
+ </table>
+ </body>
+ </html>
+ """
+ html = textwrap.dedent(html)
+ lines = []
+ commit_colors = {}
+ blame_index = 0
+ blame = [context for contexts in blame for context in contexts]
+ row = 0
+ lastline = ''
+ for line in data.split('\n'):
+ lastline = line
+ column = 0
+ for c in line + '\n':
+ if blame_index < len(blame):
+ token_context = blame[blame_index]
+ if (row == token_context.row and
+ column == token_context.column + len(token_context.token)):
+ if (blame_index + 1 == len(blame) or
+ blame[blame_index].commit != blame[blame_index + 1].commit):
+ lines.append('</a></span>')
+ blame_index += 1
+ if blame_index < len(blame):
+ token_context = blame[blame_index]
+ if row == token_context.row and column == token_context.column:
+ if (blame_index == 0 or
+ blame[blame_index - 1].commit != blame[blame_index].commit):
+ commit = token_context.commit
+ assert commit != None
+ lines.append(('<a href="https://chromium.googlesource.com/' +
+ 'chromium/src/+/%s">') % commit)
+ if commit not in commit_colors:
+ commit_colors[commit] = generate_pastel_color()
+ color = commit_colors[commit]
+ lines.append('<span style="background-color: %s">' % color)
+ lines.append(cgi.escape(c))
+ column += 1
+ row += 1
+ line_nums = range(1, row if lastline.strip() == '' else row + 1)
+ line_nums = '\n'.join([str(num) for num in line_nums])
+ lines = ''.join(lines)
+ return html % (line_nums, lines)
+
+
+def show_visualization(html):
+ """Display |html| in a web browser.
+
+ Args:
+ html: The contents of the file to display, as a string.
+ """
+ # Keep the temporary file around so the browser has time to open it.
+ # TODO(thomasanderson): spin up a temporary web server to serve this
+ # file so we don't have to leak it.
+ html_file = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
+ html_file.write(html)
+ html_file.flush()
+ if sys.platform.startswith('linux'):
+ # Don't show any messages when starting the browser.
+ saved_stdout = os.dup(1)
+ saved_stderr = os.dup(2)
+ os.close(1)
+ os.close(2)
+ os.open(os.devnull, os.O_RDWR)
+ os.open(os.devnull, os.O_RDWR)
+ webbrowser.open('file://' + html_file.name)
+ if sys.platform.startswith('linux'):
+ os.dup2(saved_stdout, 1)
+ os.dup2(saved_stderr, 2)
+ os.close(saved_stdout)
+ os.close(saved_stderr)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description='Show what revision last modified each token of a file')
+ parser.add_argument('revision', default='HEAD', nargs='?',
+ help='Show only commits starting from a revision.')
+ parser.add_argument('file', help='The file to uberblame.')
+ args = parser.parse_args()
+
+ data, blame = uberblame(args.file, args.revision)
+ html = visualize_uberblame(data, blame)
+ show_visualization(html)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/chromium/tools/v8_context_snapshot/BUILD.gn b/chromium/tools/v8_context_snapshot/BUILD.gn
index e4d15f18307..9f2d3db2462 100644
--- a/chromium/tools/v8_context_snapshot/BUILD.gn
+++ b/chromium/tools/v8_context_snapshot/BUILD.gn
@@ -7,8 +7,9 @@
# third_party/WebKit/Source/bindings/core/v8/V8ContextSnapshot.{cpp|h}.
# to speedup creating a V8 context and setting up around it.
+import("//tools/v8_context_snapshot/v8_context_snapshot.gni")
+
import("//build/config/c++/c++.gni")
-import("//build/config/chromecast_build.gni")
import("//build/config/compiler/compiler.gni")
import("//v8/snapshot_toolchain.gni")
@@ -16,10 +17,6 @@ if (is_android) {
import("//build/config/android/rules.gni")
}
-declare_args() {
- use_v8_context_snapshot = !is_chromeos && !is_android && !is_chromecast
-}
-
if (is_android) {
android_assets("v8_context_snapshot_assets") {
deps = [
@@ -40,9 +37,15 @@ group("v8_context_snapshot") {
}
}
+config("use_v8_context_snapshot") {
+ if (use_v8_context_snapshot) {
+ defines = [ "USE_V8_CONTEXT_SNAPSHOT" ]
+ }
+}
+
if (use_v8_context_snapshot) {
action("generate_v8_context_snapshot") {
- script = "run.py"
+ script = "//build/gn_run_binary.py"
output_file = "$root_out_dir/v8_context_snapshot.bin"
output_path = rebase_path(output_file, root_build_dir)
@@ -71,10 +74,10 @@ if (use_v8_context_snapshot) {
# disable it while taking a V8 snapshot.
config("disable_icf") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
- if (use_gold || use_lld) {
+ if (is_win) {
+ ldflags = [ "/OPT:NOICF" ] # link.exe, but also lld-link.exe.
+ } else if (use_gold || use_lld) {
ldflags = [ "-Wl,--icf=none" ]
- } else if (is_win) {
- ldflags = [ "/OPT:NOICF" ]
}
}
diff --git a/chromium/tools/v8_context_snapshot/run.py b/chromium/tools/v8_context_snapshot/run.py
deleted file mode 100644
index a376b4c10e8..00000000000
--- a/chromium/tools/v8_context_snapshot/run.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This program wraps an arbitrary command since gn currently can only execute
-scripts."""
-
-import os
-import subprocess
-import sys
-
-args = sys.argv[1:]
-args[0] = os.path.abspath(args[0])
-
-sys.exit(subprocess.call(args))
diff --git a/chromium/tools/v8_context_snapshot/v8_context_snapshot.gni b/chromium/tools/v8_context_snapshot/v8_context_snapshot.gni
new file mode 100644
index 00000000000..e43ee810fec
--- /dev/null
+++ b/chromium/tools/v8_context_snapshot/v8_context_snapshot.gni
@@ -0,0 +1,18 @@
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Targets in ths file are to take a V8 context snapshot on build time.
+# Created V8 context snapshot is used in
+# third_party/WebKit/Source/bindings/core/v8/V8ContextSnapshot.{cpp|h}.
+# to speedup creating a V8 context and setting up around it.
+
+import("//build/config/chromecast_build.gni")
+import("//build/config/v8_target_cpu.gni")
+
+declare_args() {
+ # TODO(crbug.com/764576): Enable the feature on more environments.
+ use_v8_context_snapshot =
+ target_os != "chromeos" && target_os != "android" && !is_chromecast &&
+ (v8_target_cpu == target_cpu || is_msan)
+}
diff --git a/chromium/tools/valgrind/browser_wrapper_win.py b/chromium/tools/valgrind/browser_wrapper_win.py
deleted file mode 100644
index 0023ca7dfb7..00000000000
--- a/chromium/tools/valgrind/browser_wrapper_win.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import glob
-import os
-import re
-import sys
-import subprocess
-
-# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
-# support layout_tests, remove Dr. Memory specific code and verify it works
-# on a "clean" Mac.
-
-testcase_name = None
-for arg in sys.argv:
- m = re.match("\-\-gtest_filter=(.*)", arg)
- if m:
- assert testcase_name is None
- testcase_name = m.groups()[0]
-
-# arg #0 is the path to this python script
-cmd_to_run = sys.argv[1:]
-
-# TODO(timurrrr): this is Dr. Memory-specific
-# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
-# To group reports per UI test, we want to put the reports for each test into a
-# separate directory. This code can be simplified when we have
-# https://github.com/DynamoRIO/drmemory/issues/684 fixed.
-logdir_idx = cmd_to_run.index("-logdir")
-old_logdir = cmd_to_run[logdir_idx + 1]
-
-wrapper_pid = str(os.getpid())
-
-# On Windows, there is a chance of PID collision. We avoid it by appending the
-# number of entries in the logdir at the end of wrapper_pid.
-# This number is monotonic and we can't have two simultaneously running wrappers
-# with the same PID.
-wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
-
-cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
-os.makedirs(cmd_to_run[logdir_idx + 1])
-
-if testcase_name:
- f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
- print >>f, testcase_name
- f.close()
-
-exit(subprocess.call(cmd_to_run))
diff --git a/chromium/tools/valgrind/drmemory.bat b/chromium/tools/valgrind/drmemory.bat
deleted file mode 100755
index fe911e4c328..00000000000
--- a/chromium/tools/valgrind/drmemory.bat
+++ /dev/null
@@ -1,5 +0,0 @@
-@echo off
-:: Copyright (c) 2011 The Chromium Authors. All rights reserved.
-:: Use of this source code is governed by a BSD-style license that can be
-:: found in the LICENSE file.
-%~dp0\chrome_tests.bat -t cmdline --tool drmemory %*
diff --git a/chromium/tools/valgrind/drmemory_analyze.py b/chromium/tools/valgrind/drmemory_analyze.py
deleted file mode 100755
index 29fc0ed4b0c..00000000000
--- a/chromium/tools/valgrind/drmemory_analyze.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2011 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# drmemory_analyze.py
-
-''' Given a Dr. Memory output file, parses errors and uniques them.'''
-
-from collections import defaultdict
-import common
-import hashlib
-import logging
-import optparse
-import os
-import re
-import subprocess
-import sys
-import time
-
-class DrMemoryError:
- def __init__(self, report, suppression, testcase):
- self._report = report
- self._testcase = testcase
-
- # Chromium-specific transformations of the suppressions:
- # Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
- # Dr.Memory-generated error ids from the name= lines as they don't
- # make sense in a multiprocess report.
- supp_lines = suppression.split("\n")
- for l in xrange(len(supp_lines)):
- if supp_lines[l].startswith("name="):
- supp_lines[l] = "name=<insert_a_suppression_name_here>"
- if supp_lines[l].startswith("chrome.dll!"):
- supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
- bang_index = supp_lines[l].find("!")
- d_exe_index = supp_lines[l].find(".exe!")
- if bang_index >= 4 and d_exe_index + 4 == bang_index:
- supp_lines[l] = "*" + supp_lines[l][bang_index:]
- self._suppression = "\n".join(supp_lines)
-
- def __str__(self):
- output = ""
- output += "### BEGIN MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
- self.ErrorHash()
- output += self._report + "\n"
- if self._testcase:
- output += "The report came from the `%s` test.\n" % self._testcase
- output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
- output += (" For more info on using suppressions see "
- "http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
- output += "{\n%s\n}\n" % self._suppression
- output += "### END MEMORY TOOL REPORT (error hash=#%016X#)\n" % \
- self.ErrorHash()
- return output
-
- # This is a device-independent hash identifying the suppression.
- # By printing out this hash we can find duplicate reports between tests and
- # different shards running on multiple buildbots
- def ErrorHash(self):
- return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
-
- def __hash__(self):
- return hash(self._suppression)
-
- def __eq__(self, rhs):
- return self._suppression == rhs
-
-
-class DrMemoryAnalyzer:
- ''' Given a set of Dr.Memory output files, parse all the errors out of
- them, unique them and output the results.'''
-
- def __init__(self):
- self.known_errors = set()
- self.error_count = 0;
-
- def ReadLine(self):
- self.line_ = self.cur_fd_.readline()
-
- def ReadSection(self):
- result = [self.line_]
- self.ReadLine()
- while len(self.line_.strip()) > 0:
- result.append(self.line_)
- self.ReadLine()
- return result
-
- def ParseReportFile(self, filename, testcase):
- ret = []
-
- # First, read the generated suppressions file so we can easily lookup a
- # suppression for a given error.
- supp_fd = open(filename.replace("results", "suppress"), 'r')
- generated_suppressions = {} # Key -> Error #, Value -> Suppression text.
- for line in supp_fd:
- # NOTE: this regexp looks fragile. Might break if the generated
- # suppression format slightly changes.
- m = re.search("# Suppression for Error #([0-9]+)", line.strip())
- if not m:
- continue
- error_id = int(m.groups()[0])
- assert error_id not in generated_suppressions
- # OK, now read the next suppression:
- cur_supp = ""
- for supp_line in supp_fd:
- if supp_line.startswith("#") or supp_line.strip() == "":
- break
- cur_supp += supp_line
- generated_suppressions[error_id] = cur_supp.strip()
- supp_fd.close()
-
- self.cur_fd_ = open(filename, 'r')
- while True:
- self.ReadLine()
- if (self.line_ == ''): break
-
- match = re.search("^Error #([0-9]+): (.*)", self.line_)
- if match:
- error_id = int(match.groups()[0])
- self.line_ = match.groups()[1].strip() + "\n"
- report = "".join(self.ReadSection()).strip()
- suppression = generated_suppressions[error_id]
- ret.append(DrMemoryError(report, suppression, testcase))
-
- if re.search("SUPPRESSIONS USED:", self.line_):
- self.ReadLine()
- while self.line_.strip() != "":
- line = self.line_.strip()
- (count, name) = re.match(" *([0-9\?]+)x(?: \(.*?\))?: (.*)",
- line).groups()
- if (count == "?"):
- # Whole-module have no count available: assume 1
- count = 1
- else:
- count = int(count)
- self.used_suppressions[name] += count
- self.ReadLine()
-
- if self.line_.startswith("ASSERT FAILURE"):
- ret.append(self.line_.strip())
-
- self.cur_fd_.close()
- return ret
-
- def Report(self, filenames, testcase, check_sanity):
- sys.stdout.flush()
- # TODO(timurrrr): support positive tests / check_sanity==True
- self.used_suppressions = defaultdict(int)
-
- to_report = []
- reports_for_this_test = set()
- for f in filenames:
- cur_reports = self.ParseReportFile(f, testcase)
-
- # Filter out the reports that were there in previous tests.
- for r in cur_reports:
- if r in reports_for_this_test:
- # A similar report is about to be printed for this test.
- pass
- elif r in self.known_errors:
- # A similar report has already been printed in one of the prev tests.
- to_report.append("This error was already printed in some "
- "other test, see 'hash=#%016X#'" % r.ErrorHash())
- reports_for_this_test.add(r)
- else:
- self.known_errors.add(r)
- reports_for_this_test.add(r)
- to_report.append(r)
-
- common.PrintUsedSuppressionsList(self.used_suppressions)
-
- if not to_report:
- logging.info("PASS: No error reports found")
- return 0
-
- sys.stdout.flush()
- sys.stderr.flush()
- logging.info("Found %i error reports" % len(to_report))
- for report in to_report:
- self.error_count += 1
- logging.info("Report #%d\n%s" % (self.error_count, report))
- logging.info("Total: %i error reports" % len(to_report))
- sys.stdout.flush()
- return -1
-
-
-def main():
- '''For testing only. The DrMemoryAnalyze class should be imported instead.'''
- parser = optparse.OptionParser("usage: %prog <files to analyze>")
-
- (options, args) = parser.parse_args()
- if len(args) == 0:
- parser.error("no filename specified")
- filenames = args
-
- logging.getLogger().setLevel(logging.INFO)
- return DrMemoryAnalyzer().Report(filenames, None, False)
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/chromium/tools/valgrind/gtest_exclude/unit_tests.gtest_linux.txt b/chromium/tools/valgrind/gtest_exclude/unit_tests.gtest_linux.txt
deleted file mode 100644
index e30315a7167..00000000000
--- a/chromium/tools/valgrind/gtest_exclude/unit_tests.gtest_linux.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-# http://crbug.com/336349
-NTPUserDataLoggerTest.TestLogging
-
-# http://crbug.com/403533
-ExtensionPathUtilTest.BasicPrettifyPathTest
-
-# http://crbug.com/523600
-ClientCertStoreChromeOSTest.Filter
-ClientCertStoreChromeOSTest.RequestsAfterNSSInitSucceed
-ClientCertStoreChromeOSTest.CertRequestMatching
diff --git a/chromium/tools/valgrind/suppressions.py b/chromium/tools/valgrind/suppressions.py
index e066bedead1..2653753f817 100755
--- a/chromium/tools/valgrind/suppressions.py
+++ b/chromium/tools/valgrind/suppressions.py
@@ -55,11 +55,6 @@ def GetSuppressions():
vg_mac = ReadSuppressionsFromFile(supp_filename)
result['mac_suppressions'] = vg_mac
- supp_filename = JOIN(suppressions_root, "drmemory", "suppressions.txt")
- result['drmem_suppressions'] = ReadSuppressionsFromFile(supp_filename)
- supp_filename = JOIN(suppressions_root, "drmemory", "suppressions_full.txt")
- result['drmem_full_suppressions'] = ReadSuppressionsFromFile(supp_filename)
-
return result
diff --git a/chromium/tools/valgrind/test_suppressions.py b/chromium/tools/valgrind/test_suppressions.py
index 3ad374d86ad..d4aaf354ec4 100755
--- a/chromium/tools/valgrind/test_suppressions.py
+++ b/chromium/tools/valgrind/test_suppressions.py
@@ -145,10 +145,6 @@ def main(argv):
cur_supp += supp['mac_suppressions']
elif all([re.search("Linux%20", url) for url in all_reports[r]]):
cur_supp += supp['linux_suppressions']
- if all(["DrMemory" in url for url in all_reports[r]]):
- cur_supp += supp['drmem_suppressions']
- if all(["DrMemory%20full" in url for url in all_reports[r]]):
- cur_supp += supp['drmem_full_suppressions']
# Test if this report is already suppressed
skip = False
diff --git a/chromium/tools/variations/fieldtrial_to_struct_unittest.py b/chromium/tools/variations/fieldtrial_to_struct_unittest.py
index 4f10bacb038..b08f0245702 100644
--- a/chromium/tools/variations/fieldtrial_to_struct_unittest.py
+++ b/chromium/tools/variations/fieldtrial_to_struct_unittest.py
@@ -208,20 +208,22 @@ class FieldTrialToStruct(unittest.TestCase):
self.assertEqual(expected, result)
def test_FieldTrialToStructMain(self):
- schema = ('../../components/variations/field_trial_config/'
- 'field_trial_testing_config_schema.json')
+ schema = (os.path.dirname(__file__) +
+ '/../../components/variations/field_trial_config/'
+ 'field_trial_testing_config_schema.json')
+ unittest_data_dir = os.path.dirname(__file__) + '/unittest_data/'
test_output_filename = 'test_output'
fieldtrial_to_struct.main([
'--schema=' + schema,
'--output=' + test_output_filename,
'--platform=win',
'--year=2015',
- 'unittest_data/test_config.json'
+ unittest_data_dir + 'test_config.json'
])
header_filename = test_output_filename + '.h'
with open(header_filename, 'r') as header:
test_header = header.read()
- with open('unittest_data/expected_output.h', 'r') as expected:
+ with open(unittest_data_dir + 'expected_output.h', 'r') as expected:
expected_header = expected.read()
self.assertEqual(expected_header, test_header)
os.unlink(header_filename)
@@ -229,7 +231,7 @@ class FieldTrialToStruct(unittest.TestCase):
cc_filename = test_output_filename + '.cc'
with open(cc_filename, 'r') as cc:
test_cc = cc.read()
- with open('unittest_data/expected_output.cc', 'r') as expected:
+ with open(unittest_data_dir + 'expected_output.cc', 'r') as expected:
expected_cc = expected.read()
self.assertEqual(expected_cc, test_cc)
os.unlink(cc_filename)
diff --git a/chromium/tools/web_dev_style/js_checker.py b/chromium/tools/web_dev_style/js_checker.py
index 46bc62f286a..117979ea09f 100644
--- a/chromium/tools/web_dev_style/js_checker.py
+++ b/chromium/tools/web_dev_style/js_checker.py
@@ -86,11 +86,11 @@ class JSChecker(object):
return [self.output_api.PresubmitError(output)] if output else []
- def VarNameCheck(self, i, line):
+ def VariableNameCheck(self, i, line):
"""See the style guide. http://goo.gl/eQiXVW"""
return self.RegexCheck(i, line,
- r"var (?!g_\w+)(_?[a-z][a-zA-Z]*[_$][\w_$]*)(?<! \$)",
- "Please use var namesLikeThis <https://goo.gl/eQiXVW>")
+ r"(?:var|let|const) (?!g_\w+)(_?[a-z][a-zA-Z]*[_$][\w_$]*)(?<! \$)",
+ "Please use variable namesLikeThis <https://goo.gl/eQiXVW>")
def _GetErrorHighlight(self, start, length):
"""Takes a start position and a length, and produces a row of '^'s to
@@ -123,7 +123,7 @@ class JSChecker(object):
self.ExtraDotInGenericCheck(i, line),
self.InheritDocCheck(i, line),
self.PolymerLocalIdCheck(i, line),
- self.VarNameCheck(i, line),
+ self.VariableNameCheck(i, line),
])
if error_lines:
diff --git a/chromium/tools/web_dev_style/js_checker_test.py b/chromium/tools/web_dev_style/js_checker_test.py
index a481a756ced..b5a4cedc019 100755
--- a/chromium/tools/web_dev_style/js_checker_test.py
+++ b/chromium/tools/web_dev_style/js_checker_test.py
@@ -17,6 +17,9 @@ import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
+_DECLARATION_METHODS = 'const', 'let', 'var'
+
+
class JsCheckerTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
@@ -208,46 +211,48 @@ class JsCheckerTest(SuperMoxTestBase):
for line in lines:
self.ShouldPassPolymerLocalIdCheck(line)
- def ShouldFailVarNameCheck(self, line):
+ def ShouldFailVariableNameCheck(self, line):
"""Checks that var unix_hacker, $dollar are style errors."""
- error = self.checker.VarNameCheck(1, line)
+ error = self.checker.VariableNameCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = test_util.GetHighlight(line, error)
- self.assertFalse('var ' in highlight);
+ self.assertFalse(any(dm in highlight for dm in _DECLARATION_METHODS))
- def ShouldPassVarNameCheck(self, line):
+ def ShouldPassVariableNameCheck(self, line):
"""Checks that variableNamesLikeThis aren't style errors."""
- self.assertEqual('', self.checker.VarNameCheck(1, line),
+ self.assertEqual('', self.checker.VariableNameCheck(1, line),
msg='Should not be flagged as style error: ' + line)
- def testVarNameFails(self):
+ def testVariableNameFails(self):
lines = [
- "var private_;",
- "var hostName_ = 'https://google.com';",
- " var _super_private",
- " var unix_hacker = someFunc();",
+ "%s private_;",
+ "%s hostName_ = 'https://google.com';",
+ " %s _super_private",
+ " %s unix_hacker = someFunc();",
]
for line in lines:
- self.ShouldFailVarNameCheck(line)
+ for declaration_method in _DECLARATION_METHODS:
+ self.ShouldFailVariableNameCheck(line % declaration_method)
- def testVarNamePasses(self):
+ def testVariableNamePasses(self):
lines = [
- " var namesLikeThis = [];",
- " for (var i = 0; i < 10; ++i) { ",
- "for (var i in obj) {",
- " var one, two, three;",
- " var magnumPI = {};",
- " var g_browser = 'da browzer';",
- "/** @const */ var Bla = options.Bla;", # goog.scope() replacement.
- " var $ = function() {", # For legacy reasons.
- " var StudlyCaps = cr.define('bla')", # Classes.
- " var SCARE_SMALL_CHILDREN = [", # TODO(dbeam): add @const in
+ " %s namesLikeThis = [];",
+ " for (%s i = 0; i < 10; ++i) { ",
+ "for (%s i in obj) {",
+ " %s one, two, three;",
+ " %s magnumPI = {};",
+ " %s g_browser = 'da browzer';",
+ "/** @const */ %s Bla = options.Bla;", # goog.scope() replacement.
+ " %s $ = function() {", # For legacy reasons.
+ " %s StudlyCaps = cr.define('bla')", # Classes.
+ " %s SCARE_SMALL_CHILDREN = [", # TODO(dbeam): add @const in
# front of all these vars like
- "/** @const */ CONST_VAR = 1;", # this line has (<--).
+ # "/** @const */ %s CONST_VAR = 1;", # this line has (<--).
]
for line in lines:
- self.ShouldPassVarNameCheck(line)
+ for declaration_method in _DECLARATION_METHODS:
+ self.ShouldPassVariableNameCheck(line % declaration_method)
if __name__ == '__main__':
diff --git a/chromium/tools/win/DebugVisualizers/webkit.natvis b/chromium/tools/win/DebugVisualizers/webkit.natvis
index 8180fc7b6aa..6001763bca0 100644
--- a/chromium/tools/win/DebugVisualizers/webkit.natvis
+++ b/chromium/tools/win/DebugVisualizers/webkit.natvis
@@ -34,7 +34,7 @@
<Item Name="Length">length_</Item>
<Item Name="Hash">hash_</Item>
<Item Name="AsciiText" Condition="is8_bit_">(this+1),[length_]s</Item>
- <Item Name="UnicodeText" Condition="!is8_bit_">(this+1),[length_]su</Item>
+ <Item Name="UnicodeText" Condition="!is8_bit_">(wchar_t*)(this+1),[length_]su</Item>
</Expand>
</Type>
<Type Name="WTF::AtomicString">
@@ -211,14 +211,12 @@
</Type>
<!-- Layout: LayoutNG -->
<Type Name="blink::NGBlockNode">
- <DisplayString>{layout_box_}</DisplayString>
+ <DisplayString>{*box_}</DisplayString>
</Type>
<Type Name="blink::NGInlineNode">
- <DisplayString>{*start_inline_}</DisplayString>
+ <DisplayString>{*box_}</DisplayString>
<Expand>
- <Item Name="inline_node_data">*block_->ng_inline_node_data_</Item>
- <Item Name="text_content">block_->ng_inline_node_data_->text_content_</Item>
- <Item Name="items">block_->ng_inline_node_data_->items_</Item>
+ <Item Name="inline_node_data">*((blink::LayoutNGBlockFlow*)box_)->ng_inline_node_data_</Item>
</Expand>
</Type>
<Type Name="blink::NGInlineItem">
@@ -276,6 +274,6 @@
<DisplayString>{platform_data_}</DisplayString>
</Type>
<Type Name="blink::FontPlatformData">
- <DisplayString>{*typeface_.ptr_}, {text_size_}px</DisplayString>
+ <DisplayString>{*typeface_.fPtr}, {text_size_}px</DisplayString>
</Type>
</AutoVisualizer> \ No newline at end of file