summaryrefslogtreecommitdiff
path: root/ninja
diff options
context:
space:
mode:
authorZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
committerZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
commit679147eead574d186ebf3069647b4c23e8ccace6 (patch)
treefc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /ninja
downloadqtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz
Initial import.
Diffstat (limited to 'ninja')
-rw-r--r--ninja/.travis.yml8
-rw-r--r--ninja/COPYING202
-rw-r--r--ninja/HACKING.md180
-rw-r--r--ninja/README16
-rw-r--r--ninja/RELEASING12
-rwxr-xr-xninja/bootstrap.py163
-rwxr-xr-xninja/configure.py447
-rw-r--r--ninja/doc/docbook.xsl17
-rw-r--r--ninja/doc/doxygen.config1250
-rw-r--r--ninja/doc/manual.asciidoc916
-rw-r--r--ninja/doc/style.css35
-rw-r--r--ninja/misc/bash-completion40
-rw-r--r--ninja/misc/inherited-fds.ninja23
-rw-r--r--ninja/misc/long-slow-build.ninja38
-rwxr-xr-xninja/misc/measure.py54
-rw-r--r--ninja/misc/ninja-mode.el42
-rw-r--r--ninja/misc/ninja.vim81
-rw-r--r--ninja/misc/ninja_syntax.py158
-rwxr-xr-xninja/misc/ninja_syntax_test.py152
-rw-r--r--ninja/misc/packaging/ninja.spec42
-rwxr-xr-xninja/misc/packaging/rpmbuild.sh29
-rw-r--r--ninja/misc/zsh-completion21
-rw-r--r--ninja/platform_helper.py71
-rw-r--r--ninja/src/browse.cc65
-rw-r--r--ninja/src/browse.h27
-rwxr-xr-xninja/src/browse.py197
-rw-r--r--ninja/src/build.cc848
-rw-r--r--ninja/src/build.h279
-rw-r--r--ninja/src/build_log.cc383
-rw-r--r--ninja/src/build_log.h86
-rw-r--r--ninja/src/build_log_perftest.cc144
-rw-r--r--ninja/src/build_log_test.cc264
-rw-r--r--ninja/src/build_test.cc1752
-rw-r--r--ninja/src/canon_perftest.cc56
-rw-r--r--ninja/src/clean.cc259
-rw-r--r--ninja/src/clean.h107
-rw-r--r--ninja/src/clean_test.cc374
-rw-r--r--ninja/src/depfile_parser.cc219
-rw-r--r--ninja/src/depfile_parser.h35
-rw-r--r--ninja/src/depfile_parser.in.cc116
-rw-r--r--ninja/src/depfile_parser_test.cc139
-rw-r--r--ninja/src/deps_log.cc335
-rw-r--r--ninja/src/deps_log.h110
-rw-r--r--ninja/src/deps_log_test.cc384
-rw-r--r--ninja/src/disk_interface.cc177
-rw-r--r--ninja/src/disk_interface.h70
-rw-r--r--ninja/src/disk_interface_test.cc207
-rw-r--r--ninja/src/edit_distance.cc66
-rw-r--r--ninja/src/edit_distance.h25
-rw-r--r--ninja/src/edit_distance_test.cc48
-rw-r--r--ninja/src/eval_env.cc80
-rw-r--r--ninja/src/eval_env.h78
-rw-r--r--ninja/src/exit_status.h24
-rw-r--r--ninja/src/explain.cc15
-rw-r--r--ninja/src/explain.h27
-rwxr-xr-xninja/src/gen_doxygen_mainpage.sh92
-rw-r--r--ninja/src/getopt.c408
-rw-r--r--ninja/src/getopt.h55
-rw-r--r--ninja/src/graph.cc451
-rw-r--r--ninja/src/graph.h266
-rw-r--r--ninja/src/graph_test.cc228
-rw-r--r--ninja/src/graphviz.cc77
-rw-r--r--ninja/src/graphviz.h34
-rw-r--r--ninja/src/hash_collision_bench.cc62
-rw-r--r--ninja/src/hash_map.h109
-rw-r--r--ninja/src/includes_normalize-win32.cc115
-rw-r--r--ninja/src/includes_normalize.h35
-rw-r--r--ninja/src/includes_normalize_test.cc104
-rwxr-xr-xninja/src/inline.sh25
-rw-r--r--ninja/src/lexer.cc816
-rw-r--r--ninja/src/lexer.h105
-rw-r--r--ninja/src/lexer.in.cc264
-rw-r--r--ninja/src/lexer_test.cc97
-rw-r--r--ninja/src/line_printer.cc109
-rw-r--r--ninja/src/line_printer.h52
-rw-r--r--ninja/src/manifest_parser.cc379
-rw-r--r--ninja/src/manifest_parser.h69
-rw-r--r--ninja/src/manifest_parser_test.cc740
-rw-r--r--ninja/src/metrics.cc125
-rw-r--r--ninja/src/metrics.h92
-rw-r--r--ninja/src/minidump-win32.cc88
-rw-r--r--ninja/src/msvc_helper-win32.cc185
-rw-r--r--ninja/src/msvc_helper.h62
-rw-r--r--ninja/src/msvc_helper_main-win32.cc135
-rw-r--r--ninja/src/msvc_helper_test.cc112
-rw-r--r--ninja/src/ninja.cc1030
-rw-r--r--ninja/src/ninja_test.cc88
-rw-r--r--ninja/src/parser_perftest.cc77
-rw-r--r--ninja/src/state.cc222
-rw-r--r--ninja/src/state.h134
-rw-r--r--ninja/src/state_test.cc47
-rw-r--r--ninja/src/string_piece.h53
-rw-r--r--ninja/src/subprocess-posix.cc287
-rw-r--r--ninja/src/subprocess-win32.cc280
-rw-r--r--ninja/src/subprocess.h98
-rw-r--r--ninja/src/subprocess_test.cc197
-rw-r--r--ninja/src/test.cc186
-rw-r--r--ninja/src/test.h98
-rw-r--r--ninja/src/timestamp.h24
-rw-r--r--ninja/src/util.cc379
-rw-r--r--ninja/src/util.h100
-rw-r--r--ninja/src/util_test.cc165
-rw-r--r--ninja/src/version.cc53
-rw-r--r--ninja/src/version.h32
-rw-r--r--ninja/src/win32port.h31
105 files changed, 20065 insertions, 0 deletions
diff --git a/ninja/.travis.yml b/ninja/.travis.yml
new file mode 100644
index 00000000000..d7bee6f54f5
--- /dev/null
+++ b/ninja/.travis.yml
@@ -0,0 +1,8 @@
+language: cpp
+compiler:
+ - gcc
+ - clang
+before_install:
+ - sudo apt-get update -qq
+ - sudo apt-get install libgtest-dev
+script: ./bootstrap.py && ./configure.py --with-gtest=/usr/src/gtest && ./ninja ninja_test && ./ninja_test --gtest_filter=-SubprocessTest.SetWithLots
diff --git a/ninja/COPYING b/ninja/COPYING
new file mode 100644
index 00000000000..131cb1da46d
--- /dev/null
+++ b/ninja/COPYING
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2010
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ninja/HACKING.md b/ninja/HACKING.md
new file mode 100644
index 00000000000..8e1696ac7c6
--- /dev/null
+++ b/ninja/HACKING.md
@@ -0,0 +1,180 @@
+## Basic overview
+
+`./configure.py` generates the `build.ninja` files used to build
+ninja. It accepts various flags to adjust build parameters.
+
+The primary build target of interest is `ninja`, but when hacking on
+Ninja your changes should be testable so it's more useful to build
+and run `ninja_test` when developing.
+
+(`./bootstrap.py` creates a bootstrap `ninja` and runs the above
+process; it's only necessary to run if you don't have a copy of
+`ninja` to build with.)
+
+### Adjusting build flags
+
+Build in "debug" mode while developing (disables optimizations and builds
+way faster on Windows):
+
+ ./configure.py --debug
+
+To use clang, set `CXX`:
+
+ CXX=clang++ ./configure.py
+
+## How to successfully make changes to Ninja
+
+Github pull requests are convenient for me to merge (I can just click
+a button and it's all handled server-side), but I'm also comfortable
+accepting pre-github git patches (via `send-email` etc.).
+
+Good pull requests have all of these attributes:
+
+* Are scoped to one specific issue
+* Include a test to demonstrate their correctness
+* Update the docs where relevant
+* Match the Ninja coding style (see below)
+* Don't include a mess of "oops, fix typo" commits
+
+These are typically merged without hesitation. If a change is lacking
+any of the above I usually will ask you to fix it, though there are
+obvious exceptions (fixing typos in comments don't need tests).
+
+I am very wary of changes that increase the complexity of Ninja (in
+particular, new build file syntax or command-line flags) or increase
+the maintenance burden of Ninja. Ninja is already successfully in use
+by hundreds of developers for large projects and it already achieves
+(most of) the goals I set out for it to do. It's probably best to
+discuss new feature ideas on the mailing list before I shoot down your
+patch.
+
+## Testing
+
+### Installing gtest
+
+The `ninja_test` binary, containing all the tests, depends on the
+googletest (gtest) library.
+
+* On older Ubuntus it'll install as libraries into `/usr/lib`:
+
+ apt-get install libgtest
+
+* On newer Ubuntus it's only distributed as source
+
+ apt-get install libgtest-dev
+ ./configure.py --with-gtest=/usr/src/gtest
+
+* Otherwise you need to download it, unpack it, and pass
+ `--with-gtest` to `configure.py`. Get it from [its downloads
+ page](http://code.google.com/p/googletest/downloads/list); [this
+ direct download link might work
+ too](http://googletest.googlecode.com/files/gtest-1.6.0.zip).
+
+### Test-driven development
+
+Set your build command to
+
+ ./ninja ninja_test && ./ninja_test --gtest_filter=MyTest.Name
+
+now you can repeatedly run that while developing until the tests pass
+(I frequently set it as my compilation command in Emacs). Remember to
+build "all" before committing to verify the other source still works!
+
+## Testing performance impact of changes
+
+If you have a Chrome build handy, it's a good test case. Otherwise,
+[the github downoads page](https://github.com/martine/ninja/downloads)
+has a copy of the Chrome build files (and depfiles). You can untar
+that, then run
+
+ path/to/my/ninja chrome
+
+and compare that against a baseline Ninja.
+
+There's a script at `misc/measure.py` that repeatedly runs a command like
+the above (to address variance) and summarizes its runtime. E.g.
+
+ path/to/misc/measure.py path/to/my/ninja chrome
+
+For changing the depfile parser, you can also build `parser_perftest`
+and run that directly on some representative input files.
+
+## Coding guidelines
+
+Generally it's the [Google C++ coding style][], but in brief:
+
+* Function name are camelcase.
+* Member methods are camelcase, expect for trivial getters which are
+ underscore separated.
+* Local variables are underscore separated.
+* Member variables are underscore separated and suffixed by an extra
+ underscore.
+* Two spaces indentation.
+* Opening braces is at the end of line.
+* Lines are 80 columns maximum.
+* All source files should have the Google Inc. license header.
+
+[Google C++ coding style]: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
+
+## Documentation
+
+### Style guidelines
+
+* Use `///` for doxygen.
+* Use `\a` to refer to arguments.
+* It's not necessary to document each argument, especially when they're
+ relatively self-evident (e.g. in `CanonicalizePath(string* path, string* err)`,
+ the arguments are hopefully obvious)
+
+### Building the manual
+
+ sudo apt-get install asciidoc --no-install-recommends
+ ./ninja manual
+
+### Building the code documentation
+
+ sudo apt-get install doxygen
+ ./ninja doxygen
+
+## Building for Windows
+
+While developing, it's helpful to copy `ninja.exe` to another name like
+`n.exe`; otherwise, rebuilds will be unable to write `ninja.exe` because
+it's locked while in use.
+
+### Via Visual Studio
+
+* Install Visual Studio (Express is fine), [Python for Windows][],
+ and (if making changes) googletest (see above instructions)
+* In a Visual Studio command prompt: `python bootstrap.py`
+
+[Python for Windows]: http://www.python.org/getit/windows/
+
+### Via mingw on Windows (not well supported)
+
+* Install mingw, msys, and python
+* In the mingw shell, put Python in your path, and `python bootstrap.py`
+* To reconfigure, run `python configure.py`
+* Remember to strip the resulting executable if size matters to you
+
+### Via mingw on Linux (not well supported)
+
+Setup on Ubuntu Lucid:
+* `sudo apt-get install gcc-mingw32 wine`
+* `export CC=i586-mingw32msvc-cc CXX=i586-mingw32msvc-c++ AR=i586-mingw32msvc-ar`
+
+Setup on Ubuntu Precise:
+* `sudo apt-get install gcc-mingw-w64-i686 g++-mingw-w64-i686 wine`
+* `export CC=i686-w64-mingw32-gcc CXX=i686-w64-mingw32-g++ AR=i686-w64-mingw32-ar`
+
+Then run:
+* `./configure.py --platform=mingw --host=linux`
+* Build `ninja.exe` using a Linux ninja binary: `/path/to/linux/ninja`
+* Run: `./ninja.exe` (implicitly runs through wine(!))
+
+### Using Microsoft compilers on Linux (extremely flaky)
+
+The trick is to install just the compilers, and not all of Visual Studio,
+by following [these instructions][win7sdk].
+
+[win7sdk]: http://www.kegel.com/wine/cl-howto-win7sdk.html
diff --git a/ninja/README b/ninja/README
new file mode 100644
index 00000000000..733ccb39b54
--- /dev/null
+++ b/ninja/README
@@ -0,0 +1,16 @@
+Ninja is a small build system with a focus on speed.
+http://martine.github.com/ninja/
+
+See the manual -- http://martine.github.com/ninja/manual.html or
+doc/manual.asciidoc included in the distribution -- for background
+and more details.
+
+To build, run ./bootstrap.py. It first blindly compiles all non-test
+source files together, then re-builds Ninja using itself. You should
+end up with a 'ninja' binary in the source root. Run './ninja -h' for
+help.
+
+There is no installation step. The only file of interest to a user
+is the resulting ninja binary.
+
+If you're interested in making changes to Ninja, read HACKING.md first.
diff --git a/ninja/RELEASING b/ninja/RELEASING
new file mode 100644
index 00000000000..1110f0b57b0
--- /dev/null
+++ b/ninja/RELEASING
@@ -0,0 +1,12 @@
+Notes to myself on all the steps to make for a Ninja release.
+
+1. update src/version.cc with new version (with ".git")
+2. git checkout release; git merge master
+3. fix version number in src/version.cc (it will likely conflict in the above)
+4. fix version in doc/manual.asciidoc
+5. rebuild manual, put in place on website
+6. commit, tag, push (don't forget to push --tags)
+7. construct release notes from prior notes
+ credits: git shortlog -s --no-merges REV..
+8. update home page mention of latest version.
+
diff --git a/ninja/bootstrap.py b/ninja/bootstrap.py
new file mode 100755
index 00000000000..5682bf1d250
--- /dev/null
+++ b/ninja/bootstrap.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+from optparse import OptionParser
+import sys
+import os
+import glob
+import errno
+import shlex
+import shutil
+import subprocess
+import platform_helper
+
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+parser = OptionParser()
+
+parser.add_option('--verbose', action='store_true',
+ help='enable verbose build',)
+parser.add_option('--x64', action='store_true',
+ help='force 64-bit build (Windows)',)
+parser.add_option('--platform',
+ help='target platform (' + '/'.join(platform_helper.platforms()) + ')',
+ choices=platform_helper.platforms())
+parser.add_option('--force-pselect', action='store_true',
+ help="ppoll() is used by default on Linux and OpenBSD, but older versions might need to use pselect instead",)
+(options, conf_args) = parser.parse_args()
+
+
+platform = platform_helper.Platform(options.platform)
+conf_args.append("--platform=" + platform.platform())
+
+def run(*args, **kwargs):
+ returncode = subprocess.call(*args, **kwargs)
+ if returncode != 0:
+ sys.exit(returncode)
+
+# Compute system-specific CFLAGS/LDFLAGS as used in both in the below
+# g++ call as well as in the later configure.py.
+cflags = os.environ.get('CFLAGS', '').split()
+ldflags = os.environ.get('LDFLAGS', '').split()
+if platform.is_freebsd() or platform.is_openbsd():
+ cflags.append('-I/usr/local/include')
+ ldflags.append('-L/usr/local/lib')
+
+print('Building ninja manually...')
+
+try:
+ os.mkdir('build')
+except OSError:
+ e = sys.exc_info()[1]
+ if e.errno != errno.EEXIST:
+ raise
+
+sources = []
+for src in glob.glob('src/*.cc'):
+ if src.endswith('test.cc') or src.endswith('.in.cc'):
+ continue
+ if src.endswith('bench.cc'):
+ continue
+
+ filename = os.path.basename(src)
+ if filename == 'browse.cc': # Depends on generated header.
+ continue
+
+ if platform.is_windows():
+ if src.endswith('-posix.cc'):
+ continue
+ else:
+ if src.endswith('-win32.cc'):
+ continue
+
+ sources.append(src)
+
+if platform.is_windows():
+ sources.append('src/getopt.c')
+
+if platform.is_msvc():
+ cl = 'cl'
+ vcdir = os.environ.get('VCINSTALLDIR')
+ if vcdir:
+ if options.x64:
+ cl = os.path.join(vcdir, 'bin', 'x86_amd64', 'cl.exe')
+ if not os.path.exists(cl):
+ cl = os.path.join(vcdir, 'bin', 'amd64', 'cl.exe')
+ else:
+ cl = os.path.join(vcdir, 'bin', 'cl.exe')
+ args = [cl, '/nologo', '/EHsc', '/DNOMINMAX']
+else:
+ args = shlex.split(os.environ.get('CXX', 'g++'))
+ cflags.extend(['-Wno-deprecated',
+ '-DNINJA_PYTHON="' + sys.executable + '"',
+ '-DNINJA_BOOTSTRAP'])
+ if platform.is_windows():
+ cflags.append('-D_WIN32_WINNT=0x0501')
+ if options.x64:
+ cflags.append('-m64')
+if (platform.is_linux() or platform.is_openbsd()) and not options.force_pselect:
+ cflags.append('-DUSE_PPOLL')
+if options.force_pselect:
+ conf_args.append("--force-pselect")
+args.extend(cflags)
+args.extend(ldflags)
+binary = 'ninja.bootstrap'
+if platform.is_windows():
+ binary = 'ninja.bootstrap.exe'
+args.extend(sources)
+if platform.is_msvc():
+ args.extend(['/link', '/out:' + binary])
+else:
+ args.extend(['-o', binary])
+
+if options.verbose:
+ print(' '.join(args))
+
+try:
+ run(args)
+except:
+ print('Failure running:', args)
+ raise
+
+verbose = []
+if options.verbose:
+ verbose = ['-v']
+
+if platform.is_windows():
+ print('Building ninja using itself...')
+ run([sys.executable, 'configure.py'] + conf_args)
+ run(['./' + binary] + verbose)
+
+ # Copy the new executable over the bootstrap one.
+ shutil.copyfile('ninja.exe', binary)
+
+ # Clean up.
+ for obj in glob.glob('*.obj'):
+ os.unlink(obj)
+
+ print("""
+Done!
+
+Note: to work around Windows file locking, where you can't rebuild an
+in-use binary, to run ninja after making any changes to build ninja itself
+you should run ninja.bootstrap instead.""")
+else:
+ print('Building ninja using itself...')
+ run([sys.executable, 'configure.py'] + conf_args)
+ run(['./' + binary] + verbose)
+ os.unlink(binary)
+ print('Done!')
diff --git a/ninja/configure.py b/ninja/configure.py
new file mode 100755
index 00000000000..22eb1e57566
--- /dev/null
+++ b/ninja/configure.py
@@ -0,0 +1,447 @@
+#!/usr/bin/env python
+#
+# Copyright 2001 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Script that generates the build.ninja for ninja itself.
+
+Projects that use ninja themselves should either write a similar script
+or use a meta-build system that supports Ninja output."""
+
+from __future__ import print_function
+
+from optparse import OptionParser
+import os
+import sys
+import platform_helper
+sys.path.insert(0, 'misc')
+
+import ninja_syntax
+
+parser = OptionParser()
+profilers = ['gmon', 'pprof']
+parser.add_option('--platform',
+ help='target platform (' + '/'.join(platform_helper.platforms()) + ')',
+ choices=platform_helper.platforms())
+parser.add_option('--host',
+ help='host platform (' + '/'.join(platform_helper.platforms()) + ')',
+ choices=platform_helper.platforms())
+parser.add_option('--debug', action='store_true',
+ help='enable debugging extras',)
+parser.add_option('--profile', metavar='TYPE',
+ choices=profilers,
+ help='enable profiling (' + '/'.join(profilers) + ')',)
+parser.add_option('--with-gtest', metavar='PATH',
+ help='use gtest unpacked in directory PATH')
+parser.add_option('--with-python', metavar='EXE',
+ help='use EXE as the Python interpreter',
+ default=os.path.basename(sys.executable))
+parser.add_option('--force-pselect', action='store_true',
+ help="ppoll() is used by default on Linux and OpenBSD, but older versions might need to use pselect instead",)
+(options, args) = parser.parse_args()
+if args:
+ print('ERROR: extra unparsed command-line arguments:', args)
+ sys.exit(1)
+
+platform = platform_helper.Platform(options.platform)
+if options.host:
+ host = platform_helper.Platform(options.host)
+else:
+ host = platform
+
+BUILD_FILENAME = 'build.ninja'
+buildfile = open(BUILD_FILENAME, 'w')
+n = ninja_syntax.Writer(buildfile)
+n.comment('This file is used to build ninja itself.')
+n.comment('It is generated by ' + os.path.basename(__file__) + '.')
+n.newline()
+
+n.variable('ninja_required_version', '1.3')
+n.newline()
+
+n.comment('The arguments passed to configure.py, for rerunning it.')
+n.variable('configure_args', ' '.join(sys.argv[1:]))
+env_keys = set(['CXX', 'AR', 'CFLAGS', 'LDFLAGS'])
+configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
+if configure_env:
+ config_str = ' '.join([k + '=' + configure_env[k] for k in configure_env])
+ n.variable('configure_env', config_str + '$ ')
+n.newline()
+
+CXX = configure_env.get('CXX', 'g++')
+objext = '.o'
+if platform.is_msvc():
+ CXX = 'cl'
+ objext = '.obj'
+
+def src(filename):
+ return os.path.join('src', filename)
+def built(filename):
+ return os.path.join('$builddir', filename)
+def doc(filename):
+ return os.path.join('doc', filename)
+def cc(name, **kwargs):
+ return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
+def cxx(name, **kwargs):
+ return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
+def binary(name):
+ if platform.is_windows():
+ exe = name + '.exe'
+ n.build(name, 'phony', exe)
+ return exe
+ return name
+
+n.variable('builddir', 'build')
+n.variable('cxx', CXX)
+if platform.is_msvc():
+ n.variable('ar', 'link')
+else:
+ n.variable('ar', configure_env.get('AR', 'ar'))
+
+if platform.is_msvc():
+ cflags = ['/nologo', # Don't print startup banner.
+ '/Zi', # Create pdb with debug info.
+ '/W4', # Highest warning level.
+ '/WX', # Warnings as errors.
+ '/wd4530', '/wd4100', '/wd4706',
+ '/wd4512', '/wd4800', '/wd4702', '/wd4819',
+ # Disable warnings about passing "this" during initialization.
+ '/wd4355',
+ '/GR-', # Disable RTTI.
+ # Disable size_t -> int truncation warning.
+ # We never have strings or arrays larger than 2**31.
+ '/wd4267',
+ '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
+ '/D_VARIADIC_MAX=10',
+ '/DNINJA_PYTHON="%s"' % options.with_python]
+ ldflags = ['/DEBUG', '/libpath:$builddir']
+ if not options.debug:
+ cflags += ['/Ox', '/DNDEBUG', '/GL']
+ ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
+else:
+ cflags = ['-g', '-Wall', '-Wextra',
+ '-Wno-deprecated',
+ '-Wno-unused-parameter',
+ '-fno-rtti',
+ '-fno-exceptions',
+ '-fvisibility=hidden', '-pipe',
+ '-Wno-missing-field-initializers',
+ '-DNINJA_PYTHON="%s"' % options.with_python]
+ if options.debug:
+ cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
+ cflags.remove('-fno-rtti') # Needed for above pedanticness.
+ else:
+ cflags += ['-O2', '-DNDEBUG']
+ if 'clang' in os.path.basename(CXX):
+ cflags += ['-fcolor-diagnostics']
+ if platform.is_mingw():
+ cflags += ['-D_WIN32_WINNT=0x0501']
+ ldflags = ['-L$builddir']
+libs = []
+
+if platform.is_mingw():
+ cflags.remove('-fvisibility=hidden');
+ ldflags.append('-static')
+elif platform.is_sunos5():
+ cflags.remove('-fvisibility=hidden')
+elif platform.is_msvc():
+ pass
+else:
+ if options.profile == 'gmon':
+ cflags.append('-pg')
+ ldflags.append('-pg')
+ elif options.profile == 'pprof':
+ cflags.append('-fno-omit-frame-pointer')
+ libs.extend(['-Wl,--no-as-needed', '-lprofiler'])
+
+if (platform.is_linux() or platform.is_openbsd()) and not options.force_pselect:
+ cflags.append('-DUSE_PPOLL')
+
+def shell_escape(str):
+ """Escape str such that it's interpreted as a single argument by
+ the shell."""
+
+ # This isn't complete, but it's just enough to make NINJA_PYTHON work.
+ if platform.is_windows():
+ return str
+ if '"' in str:
+ return "'%s'" % str.replace("'", "\\'")
+ return str
+
+if 'CFLAGS' in configure_env:
+ cflags.append(configure_env['CFLAGS'])
+n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
+if 'LDFLAGS' in configure_env:
+ ldflags.append(configure_env['LDFLAGS'])
+n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
+n.newline()
+
+if platform.is_msvc():
+ n.rule('cxx',
+ command='$cxx /showIncludes $cflags -c $in /Fo$out',
+ description='CXX $out',
+ deps='msvc')
+else:
+ n.rule('cxx',
+ command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
+ depfile='$out.d',
+ deps='gcc',
+ description='CXX $out')
+n.newline()
+
+if host.is_msvc():
+ n.rule('ar',
+ command='lib /nologo /ltcg /out:$out $in',
+ description='LIB $out')
+elif host.is_mingw():
+ n.rule('ar',
+ command='cmd /c $ar cqs $out.tmp $in && move /Y $out.tmp $out',
+ description='AR $out')
+else:
+ n.rule('ar',
+ command='rm -f $out && $ar crs $out $in',
+ description='AR $out')
+n.newline()
+
+if platform.is_msvc():
+ n.rule('link',
+ command='$cxx $in $libs /nologo /link $ldflags /out:$out',
+ description='LINK $out')
+else:
+ n.rule('link',
+ command='$cxx $ldflags -o $out $in $libs',
+ description='LINK $out')
+n.newline()
+
+objs = []
+
+if not platform.is_windows() and not platform.is_solaris():
+ n.comment('browse_py.h is used to inline browse.py.')
+ n.rule('inline',
+ command='src/inline.sh $varname < $in > $out',
+ description='INLINE $out')
+ n.build(built('browse_py.h'), 'inline', src('browse.py'),
+ implicit='src/inline.sh',
+ variables=[('varname', 'kBrowsePy')])
+ n.newline()
+
+ objs += cxx('browse', order_only=built('browse_py.h'))
+ n.newline()
+
+n.comment('the depfile parser and ninja lexers are generated using re2c.')
+def has_re2c():
+ import subprocess
+ try:
+ proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
+ return int(proc.communicate()[0], 10) >= 1103
+ except OSError:
+ return False
+if has_re2c():
+ n.rule('re2c',
+ command='re2c -b -i --no-generation-date -o $out $in',
+ description='RE2C $out')
+ # Generate the .cc files in the source directory so we can check them in.
+ n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
+ n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
+else:
+ print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
+ "changes to src/*.in.cc will not affect your build.")
+n.newline()
+
+n.comment('Core source files all build into ninja library.')
+for name in ['build',
+ 'build_log',
+ 'clean',
+ 'depfile_parser',
+ 'deps_log',
+ 'disk_interface',
+ 'edit_distance',
+ 'eval_env',
+ 'explain',
+ 'graph',
+ 'graphviz',
+ 'lexer',
+ 'line_printer',
+ 'manifest_parser',
+ 'metrics',
+ 'state',
+ 'util',
+ 'version']:
+ objs += cxx(name)
+if platform.is_windows():
+ for name in ['subprocess-win32',
+ 'includes_normalize-win32',
+ 'msvc_helper-win32',
+ 'msvc_helper_main-win32']:
+ objs += cxx(name)
+ if platform.is_msvc():
+ objs += cxx('minidump-win32')
+ objs += cc('getopt')
+else:
+ objs += cxx('subprocess-posix')
+if platform.is_msvc():
+ ninja_lib = n.build(built('ninja.lib'), 'ar', objs)
+else:
+ ninja_lib = n.build(built('libninja.a'), 'ar', objs)
+n.newline()
+
+if platform.is_msvc():
+ libs.append('ninja.lib')
+else:
+ libs.append('-lninja')
+
+all_targets = []
+
+n.comment('Main executable is library plus main() function.')
+objs = cxx('ninja')
+ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
+ variables=[('libs', libs)])
+n.newline()
+all_targets += ninja
+
+n.comment('Tests all build into ninja_test executable.')
+
+variables = []
+test_cflags = cflags + ['-DGTEST_HAS_RTTI=0']
+test_ldflags = None
+test_libs = libs
+objs = []
+if options.with_gtest:
+ path = options.with_gtest
+
+ gtest_all_incs = '-I%s -I%s' % (path, os.path.join(path, 'include'))
+ if platform.is_msvc():
+ gtest_cflags = '/nologo /EHsc /Zi /D_VARIADIC_MAX=10 ' + gtest_all_incs
+ else:
+ gtest_cflags = '-fvisibility=hidden ' + gtest_all_incs
+ objs += n.build(built('gtest-all' + objext), 'cxx',
+ os.path.join(path, 'src', 'gtest-all.cc'),
+ variables=[('cflags', gtest_cflags)])
+
+ test_cflags.append('-I%s' % os.path.join(path, 'include'))
+else:
+ # Use gtest from system.
+ if platform.is_msvc():
+ test_libs.extend(['gtest_main.lib', 'gtest.lib'])
+ else:
+ test_libs.extend(['-lgtest_main', '-lgtest'])
+
+n.variable('test_cflags', test_cflags)
+for name in ['build_log_test',
+ 'build_test',
+ 'clean_test',
+ 'depfile_parser_test',
+ 'deps_log_test',
+ 'disk_interface_test',
+ 'edit_distance_test',
+ 'graph_test',
+ 'lexer_test',
+ 'manifest_parser_test',
+ 'ninja_test',
+ 'state_test',
+ 'subprocess_test',
+ 'test',
+ 'util_test']:
+ objs += cxx(name, variables=[('cflags', '$test_cflags')])
+if platform.is_windows():
+ for name in ['includes_normalize_test', 'msvc_helper_test']:
+ objs += cxx(name, variables=[('cflags', test_cflags)])
+
+if not platform.is_windows():
+ test_libs.append('-lpthread')
+ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
+ variables=[('ldflags', test_ldflags),
+ ('libs', test_libs)])
+n.newline()
+all_targets += ninja_test
+
+
+n.comment('Ancillary executables.')
+objs = cxx('parser_perftest')
+all_targets += n.build(binary('parser_perftest'), 'link', objs,
+ implicit=ninja_lib, variables=[('libs', libs)])
+objs = cxx('build_log_perftest')
+all_targets += n.build(binary('build_log_perftest'), 'link', objs,
+ implicit=ninja_lib, variables=[('libs', libs)])
+objs = cxx('canon_perftest')
+all_targets += n.build(binary('canon_perftest'), 'link', objs,
+ implicit=ninja_lib, variables=[('libs', libs)])
+objs = cxx('hash_collision_bench')
+all_targets += n.build(binary('hash_collision_bench'), 'link', objs,
+ implicit=ninja_lib, variables=[('libs', libs)])
+n.newline()
+
+n.comment('Generate a graph using the "graph" tool.')
+n.rule('gendot',
+ command='./ninja -t graph all > $out')
+n.rule('gengraph',
+ command='dot -Tpng $in > $out')
+dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja'])
+n.build('graph.png', 'gengraph', dot)
+n.newline()
+
+n.comment('Generate the manual using asciidoc.')
+n.rule('asciidoc',
+ command='asciidoc -b docbook -d book -o $out $in',
+ description='ASCIIDOC $out')
+n.rule('xsltproc',
+ command='xsltproc --nonet doc/docbook.xsl $in > $out',
+ description='XSLTPROC $out')
+xml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc'))
+manual = n.build(doc('manual.html'), 'xsltproc', xml,
+ implicit=doc('style.css'))
+n.build('manual', 'phony',
+ order_only=manual)
+n.newline()
+
+n.comment('Generate Doxygen.')
+n.rule('doxygen',
+ command='doxygen $in',
+ description='DOXYGEN $in')
+n.variable('doxygen_mainpage_generator',
+ src('gen_doxygen_mainpage.sh'))
+n.rule('doxygen_mainpage',
+ command='$doxygen_mainpage_generator $in > $out',
+ description='DOXYGEN_MAINPAGE $out')
+mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
+ ['README', 'COPYING'],
+ implicit=['$doxygen_mainpage_generator'])
+n.build('doxygen', 'doxygen', doc('doxygen.config'),
+ implicit=mainpage)
+n.newline()
+
+if not host.is_mingw():
+ n.comment('Regenerate build files if build script changes.')
+ n.rule('configure',
+ command='${configure_env}%s configure.py $configure_args' %
+ options.with_python,
+ generator=True)
+ n.build('build.ninja', 'configure',
+ implicit=['configure.py', os.path.normpath('misc/ninja_syntax.py')])
+ n.newline()
+
+n.default(ninja)
+n.newline()
+
+if host.is_linux():
+ n.comment('Packaging')
+ n.rule('rpmbuild',
+ command="misc/packaging/rpmbuild.sh",
+ description='Building rpms..')
+ n.build('rpm', 'rpmbuild')
+ n.newline()
+
+n.build('all', 'phony', all_targets)
+
+print('wrote %s.' % BUILD_FILENAME)
diff --git a/ninja/doc/docbook.xsl b/ninja/doc/docbook.xsl
new file mode 100644
index 00000000000..8afdc8c5922
--- /dev/null
+++ b/ninja/doc/docbook.xsl
@@ -0,0 +1,17 @@
+<!-- This soup of XML is the minimum customization necessary to make the
+ autogenerated manual look ok. -->
+<!DOCTYPE xsl:stylesheet [
+<!ENTITY css SYSTEM "style.css">
+]>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version='1.0'>
+ <xsl:import href="http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl"/>
+ <xsl:template name="user.head.content"><style>&css;</style></xsl:template>
+ <xsl:template name="body.attributes"></xsl:template>
+ <xsl:param name="generate.toc" select="'book toc'"/>
+ <xsl:param name="chapter.autolabel" select="0" />
+ <xsl:param name="toc.list.type">ul</xsl:param>
+
+ <xsl:output method="html" encoding="utf-8" indent="no"
+ doctype-public=""/>
+</xsl:stylesheet>
diff --git a/ninja/doc/doxygen.config b/ninja/doc/doxygen.config
new file mode 100644
index 00000000000..d933021e2ba
--- /dev/null
+++ b/ninja/doc/doxygen.config
@@ -0,0 +1,1250 @@
+# Doxyfile 1.4.5
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = "Ninja"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+# PROJECT_NUMBER = "0"
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = "doc/doxygen/"
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish,
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese,
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish,
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# This tag can be used to specify the encoding used in the generated output.
+# The encoding is not always determined by the language that is chosen,
+# but also whether or not the output is meant for Windows or non-Windows users.
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES
+# forces the Windows encoding (this is the default for the Windows binary),
+# whereas setting the tag to NO uses a Unix-style encoding (the default for
+# all platforms other than Windows).
+
+# Obsolet option.
+#USE_WINDOWS_ENCODING = YES
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH = src
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH = src/
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+# Has become obsolete.
+#DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 2
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for Java.
+# For instance, namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to
+# include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+# BUILTIN_STL_SUPPORT = NO
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = YES
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is YES.
+
+SHOW_DIRECTORIES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text "
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = src \
+ build/doxygen_mainpage
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py
+
+FILE_PATTERNS = *.cc \
+ *.h
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH = src
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS = *.cpp \
+ *.cc \
+ *.h \
+ *.hh \
+ INSTALL DEPENDENCIES CHANGELOG LICENSE LGPL
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = YES
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH = src
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 2
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+HTML_HEADER =
+
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME =
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME =
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = YES
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE = doc/doxygen/html/Ninja.TAGFILE
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = YES
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = NO
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+# UML_LOOK = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+# Obsolet option.
+#MAX_DOT_GRAPH_WIDTH = 1280
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+# Obsolet option.
+#MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that a graph may be further truncated if the graph's
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default),
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+# JW
+# DOT_MULTI_TARGETS = NO
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+# JW SEARCHENGINE = NO
+SEARCHENGINE = YES
diff --git a/ninja/doc/manual.asciidoc b/ninja/doc/manual.asciidoc
new file mode 100644
index 00000000000..aa5644d6451
--- /dev/null
+++ b/ninja/doc/manual.asciidoc
@@ -0,0 +1,916 @@
+Ninja
+=====
+Evan Martin <martine@danga.com>
+
+
+Introduction
+------------
+
+Ninja is yet another build system. It takes as input the
+interdependencies of files (typically source code and output
+executables) and orchestrates building them, _quickly_.
+
+Ninja joins a sea of other build systems. Its distinguishing goal is
+to be fast. It is born from
+http://neugierig.org/software/chromium/notes/2011/02/ninja.html[my
+work on the Chromium browser project], which has over 30,000 source
+files and whose other build systems (including one built from custom
+non-recursive Makefiles) would take ten seconds to start building
+after changing one file. Ninja is under a second.
+
+Philosophical overview
+~~~~~~~~~~~~~~~~~~~~~~
+
+Where other build systems are high-level languages, Ninja aims to be
+an assembler.
+
+Build systems get slow when they need to make decisions. When you are
+in a edit-compile cycle you want it to be as fast as possible -- you
+want the build system to do the minimum work necessary to figure out
+what needs to be built immediately.
+
+Ninja contains the barest functionality necessary to describe
+arbitrary dependency graphs. Its lack of syntax makes it impossible
+to express complex decisions.
+
+Instead, Ninja is intended to be used with a separate program
+generating its input files. The generator program (like the
+`./configure` found in autotools projects) can analyze system
+dependencies and make as many decisions as possible up front so that
+incremental builds stay fast. Going beyond autotools, even build-time
+decisions like "which compiler flags should I use?" or "should I
+build a debug or release-mode binary?" belong in the `.ninja` file
+generator.
+
+Design goals
+~~~~~~~~~~~~
+
+Here are the design goals of Ninja:
+
+* very fast (i.e., instant) incremental builds, even for very large
+ projects.
+
+* very little policy about how code is built. Different projects and
+ higher-level build systems have different opinions about how code
+ should be built; for example, should built objects live alongside
+ the sources or should all build output go into a separate directory?
+ Is there an "package" rule that builds a distributable package of
+ the project? Sidestep these decisions by trying to allow either to
+ be implemented, rather than choosing, even if that results in
+ more verbosity.
+
+* get dependencies correct, and in particular situations that are
+ difficult to get right with Makefiles (e.g. outputs need an implicit
+ dependency on the command line used to generate them; to build C
+ source code you need to use gcc's `-M` flags for header
+ dependencies).
+
+* when convenience and speed are in conflict, prefer speed.
+
+Some explicit _non-goals_:
+
+* convenient syntax for writing build files by hand. _You should
+ generate your ninja files using another program_. This is how we
+ can sidestep many policy decisions.
+
+* built-in rules. _Out of the box, Ninja has no rules for
+ e.g. compiling C code._
+
+* build-time customization of the build. _Options belong in
+ the program that generates the ninja files_.
+
+* build-time decision-making ability such as conditionals or search
+ paths. _Making decisions is slow._
+
+To restate, Ninja is faster than other build systems because it is
+painfully simple. You must tell Ninja exactly what to do when you
+create your project's `.ninja` files.
+
+Comparison to Make
+~~~~~~~~~~~~~~~~~~
+
+Ninja is closest in spirit and functionality to Make, relying on
+simple dependencies between file timestamps.
+
+But fundamentally, make has a lot of _features_: suffix rules,
+functions, built-in rules that e.g. search for RCS files when building
+source. Make's language was designed to be written by humans. Many
+projects find make alone adequate for their build problems.
+
+In contrast, Ninja has almost no features; just those necessary to get
+builds correct while punting most complexity to generation of the
+ninja input files. Ninja by itself is unlikely to be useful for most
+projects.
+
+Here are some of the features Ninja adds to Make. (These sorts of
+features can often be implemented using more complicated Makefiles,
+but they are not part of make itself.)
+
+* Ninja has special support for discovering extra dependencies at build
+ time, making it easy to get <<ref_headers,header dependencies>>
+ correct for C/C++ code.
+
+* A build edge may have multiple outputs.
+
+* Outputs implicitly depend on the command line that was used to generate
+ them, which means that changing e.g. compilation flags will cause
+ the outputs to rebuild.
+
+* Output directories are always implicitly created before running the
+ command that relies on them.
+
+* Rules can provide shorter descriptions of the command being run, so
+ you can print e.g. `CC foo.o` instead of a long command line while
+ building.
+
+* Builds are always run in parallel, based by default on the number of
+ CPUs your system has. Underspecified build dependencies will result
+ in incorrect builds.
+
+* Command output is always buffered. This means commands running in
+ parallel don't interleave their output, and when a command fails we
+ can print its failure output next to the full command line that
+ produced the failure.
+
+
+Using Ninja for your project
+----------------------------
+
+Ninja currently works on Unix-like systems and Windows. It's seen the
+most testing on Linux (and has the best performance there) but it runs
+fine on Mac OS X and FreeBSD.
+
+If your project is small, Ninja's speed impact is likely unnoticeable.
+(However, even for small projects it sometimes turns out that Ninja's
+limited syntax forces simpler build rules that result in faster
+builds.) Another way to say this is that if you're happy with the
+edit-compile cycle time of your project already then Ninja won't help.
+
+There are many other build systems that are more user-friendly or
+featureful than Ninja itself. For some recommendations: the Ninja
+author found http://gittup.org/tup/[the tup build system] influential
+in Ninja's design, and thinks https://github.com/apenwarr/redo[redo]'s
+design is quite clever.
+
+Ninja's benefit comes from using it in conjunction with a smarter
+meta-build system.
+
+http://code.google.com/p/gyp/[gyp]:: The meta-build system used to
+generate build files for Google Chrome and related projects (v8,
+node.js). gyp can generate Ninja files for all platforms supported by
+Chrome. See the
+http://code.google.com/p/chromium/wiki/NinjaBuild[Chromium Ninja
+documentation for more details].
+
+http://www.cmake.org/[CMake]:: A widely used meta-build system that
+can generate Ninja files on Linux as of CMake version 2.8.8. (There
+is some Mac and Windows support -- http://www.reactos.org[ReactOS]
+uses Ninja on Windows for their buildbots, but those platforms are not
+yet officially supported by CMake as the full test suite doesn't
+pass.)
+
+others:: Ninja ought to fit perfectly into other meta-build software
+like http://industriousone.com/premake[premake]. If you do this work,
+please let us know!
+
+Running Ninja
+~~~~~~~~~~~~~
+
+Run `ninja`. By default, it looks for a file named `build.ninja` in
+the current directory and builds all out-of-date targets. You can
+specify which targets (files) to build as command line arguments.
+
+`ninja -h` prints help output. Many of Ninja's flags intentionally
+match those of Make; e.g `ninja -C build -j 20` changes into the
+`build` directory and runs 20 build commands in parallel. (Note that
+Ninja defaults to running commands in parallel anyway, so typically
+you don't need to pass `-j`.)
+
+
+Environment variables
+~~~~~~~~~~~~~~~~~~~~~
+
+Ninja supports one environment variable to control its behavior:
+`NINJA_STATUS`, the progress status printed before the rule being run.
+
+Several placeholders are available:
+
+`%s`:: The number of started edges.
+`%t`:: The total number of edges that must be run to complete the build.
+`%p`:: The percentage of started edges.
+`%r`:: The number of currently running edges.
+`%u`:: The number of remaining edges to start.
+`%f`:: The number of finished edges.
+`%o`:: Overall rate of finished edges per second
+`%c`:: Current rate of finished edges per second (average over builds
+specified by `-j` or its default)
+`%e`:: Elapsed time in seconds. _(Available since Ninja 1.2.)_
+`%%`:: A plain `%` character.
+
+The default progress status is `"[%s/%t] "` (note the trailing space
+to separate from the build rule). Another example of possible progress status
+could be `"[%u/%r/%f] "`.
+
+Extra tools
+~~~~~~~~~~~
+
+The `-t` flag on the Ninja command line runs some tools that we have
+found useful during Ninja's development. The current tools are:
+
+[horizontal]
+`query`:: dump the inputs and outputs of a given target.
+
+`browse`:: browse the dependency graph in a web browser. Clicking a
+file focuses the view on that file, showing inputs and outputs. This
+feature requires a Python installation.
+
+`graph`:: output a file in the syntax used by `graphviz`, a automatic
+graph layout tool. Use it like:
++
+----
+ninja -t graph mytarget | dot -Tpng -ograph.png
+----
++
+In the Ninja source tree, `ninja graph.png`
+generates an image for Ninja itself. If no target is given generate a
+graph for all root targets.
+
+`targets`:: output a list of targets either by rule or by depth. If used
+like +ninja -t targets rule _name_+ it prints the list of targets
+using the given rule to be built. If no rule is given, it prints the source
+files (the leaves of the graph). If used like
++ninja -t targets depth _digit_+ it
+prints the list of targets in a depth-first manner starting by the root
+targets (the ones with no outputs). Indentation is used to mark dependencies.
+If the depth is zero it prints all targets. If no arguments are provided
++ninja -t targets depth 1+ is assumed. In this mode targets may be listed
+several times. If used like this +ninja -t targets all+ it
+prints all the targets available without indentation and it is faster
+than the _depth_ mode.
+
+`commands`:: given a list of targets, print a list of commands which, if
+executed in order, may be used to rebuild those targets, assuming that all
+output files are out of date.
+
+`clean`:: remove built files. By default it removes all built files
+except for those created by the generator. Adding the `-g` flag also
+removes built files created by the generator (see <<ref_rule,the rule
+reference for the +generator+ attribute>>). Additional arguments are
+targets, which removes the given targets and recursively all files
+built for them.
++
+If used like +ninja -t clean -r _rules_+ it removes all files built using
+the given rules.
++
+Files created but not referenced in the graph are not removed. This
+tool takes in account the +-v+ and the +-n+ options (note that +-n+
+implies +-v+).
+
+`compdb`:: given a list of rules, each of which is expected to be a
+C family language compiler rule whose first input is the name of the
+source file, prints on standard output a compilation database in the
+http://clang.llvm.org/docs/JSONCompilationDatabase.html[JSON format] expected
+by the Clang tooling interface.
+_Available since Ninja 1.2._
+
+
+Writing your own Ninja files
+----------------------------
+
+The remainder of this manual is only useful if you are constructing
+Ninja files yourself: for example, if you're writing a meta-build
+system or supporting a new language.
+
+Conceptual overview
+~~~~~~~~~~~~~~~~~~~
+
+Ninja evaluates a graph of dependencies between files, and runs
+whichever commands are necessary to make your build target up to date
+as determined by file modification times. If you are familiar with
+Make, Ninja is very similar.
+
+A build file (default name: `build.ninja`) provides a list of _rules_
+-- short names for longer commands, like how to run the compiler --
+along with a list of _build_ statements saying how to build files
+using the rules -- which rule to apply to which inputs to produce
+which outputs.
+
+Conceptually, `build` statements describe the dependency graph of your
+project, while `rule` statements describe how to generate the files
+along a given edge of the graph.
+
+Syntax example
+~~~~~~~~~~~~~~
+
+Here's a basic `.ninja` file that demonstrates most of the syntax.
+It will be used as an example for the following sections.
+
+---------------------------------
+cflags = -Wall
+
+rule cc
+ command = gcc $cflags -c $in -o $out
+
+build foo.o: cc foo.c
+---------------------------------
+
+Variables
+~~~~~~~~~
+Despite the non-goal of being convenient to write by hand, to keep
+build files readable (debuggable), Ninja supports declaring shorter
+reusable names for strings. A declaration like the following
+
+----------------
+cflags = -g
+----------------
+
+can be used on the right side of an equals sign, dereferencing it with
+a dollar sign, like this:
+
+----------------
+rule cc
+ command = gcc $cflags -c $in -o $out
+----------------
+
+Variables can also be referenced using curly braces like `${in}`.
+
+Variables might better be called "bindings", in that a given variable
+cannot be changed, only shadowed. There is more on how shadowing works
+later in this document.
+
+Rules
+~~~~~
+
+Rules declare a short name for a command line. They begin with a line
+consisting of the `rule` keyword and a name for the rule. Then
+follows an indented set of `variable = value` lines.
+
+The basic example above declares a new rule named `cc`, along with the
+command to run. In the context of a rule, the `command` variable
+defines the command to run, `$in` expands to the list of
+input files (`foo.c`), and `$out` to the output files (`foo.o`) for the
+command. A full list of special variables is provided in
+<<ref_rule,the reference>>.
+
+Build statements
+~~~~~~~~~~~~~~~~
+
+Build statements declare a relationship between input and output
+files. They begin with the `build` keyword, and have the format
++build _outputs_: _rulename_ _inputs_+. Such a declaration says that
+all of the output files are derived from the input files. When the
+output files are missing or when the inputs change, Ninja will run the
+rule to regenerate the outputs.
+
+The basic example above describes how to build `foo.o`, using the `cc`
+rule.
+
+In the scope of a `build` block (including in the evaluation of its
+associated `rule`), the variable `$in` is the list of inputs and the
+variable `$out` is the list of outputs.
+
+A build statement may be followed by an indented set of `key = value`
+pairs, much like a rule. These variables will shadow any variables
+when evaluating the variables in the command. For example:
+
+----------------
+cflags = -Wall -Werror
+rule cc
+ command = gcc $cflags -c $in -o $out
+
+# If left unspecified, builds get the outer $cflags.
+build foo.o: cc foo.c
+
+# But you can shadow variables like cflags for a particular build.
+build special.o: cc special.c
+ cflags = -Wall
+
+# The variable was only shadowed for the scope of special.o;
+# Subsequent build lines get the outer (original) cflags.
+build bar.o: cc bar.c
+
+----------------
+
+For more discussion of how scoping works, consult <<ref_scope,the
+reference>>.
+
+If you need more complicated information passed from the build
+statement to the rule (for example, if the rule needs "the file
+extension of the first input"), pass that through as an extra
+variable, like how `cflags` is passed above.
+
+If the top-level Ninja file is specified as an output of any build
+statement and it is out of date, Ninja will rebuild and reload it
+before building the targets requested by the user.
+
+Generating Ninja files from code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`misc/ninja_syntax.py` in the Ninja distribution is a tiny Python
+module to facilitate generating Ninja files. It allows you to make
+Python calls like `ninja.rule(name='foo', command='bar',
+depfile='$out.d')` and it will generate the appropriate syntax. Feel
+free to just inline it into your project's build system if it's
+useful.
+
+
+More details
+------------
+
+The `phony` rule
+~~~~~~~~~~~~~~~~
+
+The special rule name `phony` can be used to create aliases for other
+targets. For example:
+
+----------------
+build foo: phony some/file/in/a/faraway/subdir/foo
+----------------
+
+This makes `ninja foo` build the longer path. Semantically, the
+`phony` rule is equivalent to a plain rule where the `command` does
+nothing, but phony rules are handled specially in that they aren't
+printed when run, logged (see below), nor do they contribute to the
+command count printed as part of the build process.
+
+`phony` can also be used to create dummy targets for files which
+may not exist at build time. If a phony build statement is written
+without any dependencies, the target will be considered out of date if
+it does not exist. Without a phony build statement, Ninja will report
+an error if the file does not exist and is required by the build.
+
+
+Default target statements
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, if no targets are specified on the command line, Ninja
+will build every output that is not named as an input elsewhere.
+You can override this behavior using a default target statement.
+A default target statement causes Ninja to build only a given subset
+of output files if none are specified on the command line.
+
+Default target statements begin with the `default` keyword, and have
+the format +default _targets_+. A default target statement must appear
+after the build statement that declares the target as an output file.
+They are cumulative, so multiple statements may be used to extend
+the list of default targets. For example:
+
+----------------
+default foo bar
+default baz
+----------------
+
+This causes Ninja to build the `foo`, `bar` and `baz` targets by
+default.
+
+
+[[ref_log]]
+The Ninja log
+~~~~~~~~~~~~~
+
+For each built file, Ninja keeps a log of the command used to build
+it. Using this log Ninja can know when an existing output was built
+with a different command line than the build files specify (i.e., the
+command line changed) and knows to rebuild the file.
+
+The log file is kept in the build root in a file called `.ninja_log`.
+If you provide a variable named `builddir` in the outermost scope,
+`.ninja_log` will be kept in that directory instead.
+
+
+[[ref_versioning]]
+Version compatibility
+~~~~~~~~~~~~~~~~~~~~~
+
+_Available since Ninja 1.2._
+
+Ninja version labels follow the standard major.minor.patch format,
+where the major version is increased on backwards-incompatible
+syntax/behavioral changes and the minor version is increased on new
+behaviors. Your `build.ninja` may declare a variable named
+`ninja_required_version` that asserts the minimum Ninja version
+required to use the generated file. For example,
+
+-----
+ninja_required_version = 1.1
+-----
+
+declares that the build file relies on some feature that was
+introduced in Ninja 1.1 (perhaps the `pool` syntax), and that
+Ninja 1.1 or greater must be used to build. Unlike other Ninja
+variables, this version requirement is checked immediately when
+the variable is encountered in parsing, so it's best to put it
+at the top of the build file.
+
+Ninja always warns if the major versions of Ninja and the
+`ninja_required_version` don't match; a major version change hasn't
+come up yet so it's difficult to predict what behavior might be
+required.
+
+[[ref_headers]]
+C/C++ header dependencies
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To get C/C++ header dependencies (or any other build dependency that
+works in a similar way) correct Ninja has some extra functionality.
+
+The problem with headers is that the full list of files that a given
+source file depends on can only be discovered by the compiler:
+different preprocessor defines and include paths cause different files
+to be used. Some compilers can emit this information while building,
+and Ninja can use that to get its dependencies perfect.
+
+Consider: if the file has never been compiled, it must be built anyway,
+generating the header dependencies as a side effect. If any file is
+later modified (even in a way that changes which headers it depends
+on) the modification will cause a rebuild as well, keeping the
+dependencies up to date.
+
+When loading these special dependencies, Ninja implicitly adds extra
+build edges such that it is not an error if the listed dependency is
+missing. This allows you to delete a header file and rebuild without
+the build aborting due to a missing input.
+
+depfile
+^^^^^^^
+
+`gcc` (and other compilers like `clang`) support emitting dependency
+information in the syntax of a Makefile. (Any command that can write
+dependencies in this form can be used, not just `gcc`.)
+
+To bring this information into Ninja requires cooperation. On the
+Ninja side, the `depfile` attribute on the `build` must point to a
+path where this data is written. (Ninja only supports the limited
+subset of the Makefile syntax emitted by compilers.) Then the command
+must know to write dependencies into the `depfile` path.
+Use it like in the following example:
+
+----
+rule cc
+ depfile = $out.d
+ command = gcc -MMD -MF $out.d [other gcc flags here]
+----
+
+The `-MMD` flag to `gcc` tells it to output header dependencies, and
+the `-MF` flag tells it where to write them.
+
+deps
+^^^^
+
+_(Available since Ninja 1.3.)_
+
+It turns out that for large projects (and particularly on Windows,
+where the file system is slow) loading these dependency files on
+startup is slow.
+
+Ninja 1.3 can instead process dependencies just after they're generated
+and save a compacted form of the same information in a Ninja-internal
+database.
+
+Ninja supports this processing in two forms.
+
+1. `deps = gcc` specifies that the tool outputs `gcc`-style dependencies
+ in the form of Makefiles. Adding this to the above example will
+ cause Ninja to process the `depfile` immediately after the
+ compilation finishes, then delete the `.d` file (which is only used
+ as a temporary).
+
+2. `deps = msvc` specifies that the tool outputs header dependencies
+ in the form produced by Visual Studio's compiler's
+ http://msdn.microsoft.com/en-us/library/hdkef6tk(v=vs.90).aspx[`/showIncludes`
+ flag]. Briefly, this means the tool outputs specially-formatted lines
+ to its stdout. Ninja then filters these lines from the displayed
+ output. No `depfile` attribute is necessary.
++
+----
+rule cc
+ deps = msvc
+ command = cl /showIncludes -c $in /Fo$out
+----
+
+Pools
+~~~~~
+
+_Available since Ninja 1.1._
+
+Pools allow you to allocate one or more rules or edges a finite number
+of concurrent jobs which is more tightly restricted than the default
+parallelism.
+
+This can be useful, for example, to restrict a particular expensive rule
+(like link steps for huge executables), or to restrict particular build
+statements which you know perform poorly when run concurrently.
+
+Each pool has a `depth` variable which is specified in the build file.
+The pool is then referred to with the `pool` variable on either a rule
+or a build statement.
+
+No matter what pools you specify, ninja will never run more concurrent jobs
+than the default parallelism, or the number of jobs specified on the command
+line (with `-j`).
+
+----------------
+# No more than 4 links at a time.
+pool link_pool
+ depth = 4
+
+# No more than 1 heavy object at a time.
+pool heavy_object_pool
+ depth = 1
+
+rule link
+ ...
+ pool = link_pool
+
+rule cc
+ ...
+
+# The link_pool is used here. Only 4 links will run concurrently.
+build foo.exe: link input.obj
+
+# A build statement can be exempted from its rule's pool by setting an
+# empty pool. This effectively puts the build statement back into the default
+# pool, which has infinite depth.
+build other.exe: link input.obj
+ pool =
+
+# A build statement can specify a pool directly.
+# Only one of these builds will run at a time.
+build heavy_object1.obj: cc heavy_obj1.cc
+ pool = heavy_object_pool
+build heavy_object2.obj: cc heavy_obj2.cc
+ pool = heavy_object_pool
+
+----------------
+
+
+Ninja file reference
+--------------------
+
+A file is a series of declarations. A declaration can be one of:
+
+1. A rule declaration, which begins with +rule _rulename_+, and
+ then has a series of indented lines defining variables.
+
+2. A build edge, which looks like +build _output1_ _output2_:
+ _rulename_ _input1_ _input2_+. +
+ Implicit dependencies may be tacked on the end with +|
+ _dependency1_ _dependency2_+. +
+ Order-only dependencies may be tacked on the end with +||
+ _dependency1_ _dependency2_+. (See <<ref_dependencies,the reference on
+ dependency types>>.)
+
+3. Variable declarations, which look like +_variable_ = _value_+.
+
+4. Default target statements, which look like +default _target1_ _target2_+.
+
+5. References to more files, which look like +subninja _path_+ or
+ +include _path_+. The difference between these is explained below
+ <<ref_scope,in the discussion about scoping>>.
+
+Lexical syntax
+~~~~~~~~~~~~~~
+
+Ninja is mostly encoding agnostic, as long as the bytes Ninja cares
+about (like slashes in paths) are ASCII. This means e.g. UTF-8 or
+ISO-8859-1 input files ought to work. (To simplify some code, tabs
+and carriage returns are currently disallowed; this could be fixed if
+it really mattered to you.)
+
+Comments begin with `#` and extend to the end of the line.
+
+Newlines are significant. Statements like `build foo bar` are a set
+of space-separated tokens that end at the newline. Newlines and
+spaces within a token must be escaped.
+
+There is only one escape character, `$`, and it has the following
+behaviors:
+
+[horizontal]
+`$` followed by a newline:: escape the newline (continue the current line
+across a line break).
+
+`$` followed by text:: a variable reference.
+
+`${varname}`:: alternate syntax for `$varname`.
+
+`$` followed by space:: a space. (This is only necessary in lists of
+paths, where a space would otherwise separate filenames. See below.)
+
+`$:` :: a colon. (This is only necessary in `build` lines, where a colon
+would otherwise terminate the list of outputs.)
+
+`$$`:: a literal `$`.
+
+A `build` or `default` statement is first parsed as a space-separated
+list of filenames and then each name is expanded. This means that
+spaces within a variable will result in spaces in the expanded
+filename.
+
+----
+spaced = foo bar
+build $spaced/baz other$ file: ...
+# The above build line has two outputs: "foo bar/baz" and "other file".
+----
+
+In a `name = value` statement, whitespace at the beginning of a value
+is always stripped. Whitespace at the beginning of a line after a
+line continuation is also stripped.
+
+----
+two_words_with_one_space = foo $
+ bar
+one_word_with_no_space = foo$
+ bar
+----
+
+Other whitespace is only significant if it's at the beginning of a
+line. If a line is indented more than the previous one, it's
+considered part of its parent's scope; if it is indented less than the
+previous one, it closes the previous scope.
+
+[[ref_toplevel]]
+Top-level variables
+~~~~~~~~~~~~~~~~~~~
+
+Two variables are significant when declared in the outermost file scope.
+
+`builddir`:: a directory for some Ninja output files. See <<ref_log,the
+ discussion of the build log>>. (You can also store other build output
+ in this directory.)
+
+`ninja_required_version`:: the minimum version of Ninja required to process
+ the build correctly. See <<ref_versioning,the discussion of versioning>>.
+
+
+[[ref_rule]]
+Rule variables
+~~~~~~~~~~~~~~
+
+A `rule` block contains a list of `key = value` declarations that
+affect the processing of the rule. Here is a full list of special
+keys.
+
+`command` (_required_):: the command line to run. This string (after
+ $variables are expanded) is passed directly to `sh -c` without
+ interpretation by Ninja. Each `rule` may have only one `command`
+ declaration. To specify multiple commands use `&&` (or similar) to
+ concatenate operations.
+
+`depfile`:: path to an optional `Makefile` that contains extra
+ _implicit dependencies_ (see <<ref_dependencies,the reference on
+ dependency types>>). This is explicitly to support C/C++ header
+ dependencies; see <<ref_headers,the full discussion>>.
+
+`deps`:: _(Available since Ninja 1.3.)_ if present, must be one of
+ `gcc` or `msvc` to specify special dependency processing. See
+ <<ref_headers,the full discussion>>. The generated database is
+ stored as `.ninja_deps` in the `builddir`, see <<ref_toplevel,the
+ discussion of `builddir`>>.
+
+`description`:: a short description of the command, used to pretty-print
+ the command as it's running. The `-v` flag controls whether to print
+ the full command or its description; if a command fails, the full command
+ line will always be printed before the command's output.
+
+`generator`:: if present, specifies that this rule is used to
+ re-invoke the generator program. Files built using `generator`
+ rules are treated specially in two ways: firstly, they will not be
+ rebuilt if the command line changes; and secondly, they are not
+ cleaned by default.
+
+`in`:: the shell-quoted space-separated list of files provided as
+ inputs to the build line referencing this `rule`. (`$in` is provided
+ solely for convenience; if you need some subset or variant of this
+ list of files, just construct a new variable with that list and use
+ that instead.)
+
+`in_newline`:: the same as `$in` except that multiple inputs are
+ separated by newlines rather than spaces. (For use with
+ `$rspfile_content`; this works around a bug in the MSVC linker where
+ it uses a fixed-size buffer for processing input.)
+
+`out`:: the shell-quoted space-separated list of files provided as
+ outputs to the build line referencing this `rule`.
+
+`restat`:: if present, causes Ninja to re-stat the command's outputs
+ after execution of the command. Each output whose modification time
+ the command did not change will be treated as though it had never
+ needed to be built. This may cause the output's reverse
+ dependencies to be removed from the list of pending build actions.
+
+`rspfile`, `rspfile_content`:: if present (both), Ninja will use a
+ response file for the given command, i.e. write the selected string
+ (`rspfile_content`) to the given file (`rspfile`) before calling the
+ command and delete the file after successful execution of the
+ command.
++
+This is particularly useful on Windows OS, where the maximal length of
+a command line is limited and response files must be used instead.
++
+Use it like in the following example:
++
+----
+rule link
+ command = link.exe /OUT$out [usual link flags here] @$out.rsp
+ rspfile = $out.rsp
+ rspfile_content = $in
+
+build myapp.exe: link a.obj b.obj [possibly many other .obj files]
+----
+
+[[ref_dependencies]]
+Build dependencies
+~~~~~~~~~~~~~~~~~~
+
+There are three types of build dependencies which are subtly different.
+
+1. _Explicit dependencies_, as listed in a build line. These are
+ available as the `$in` variable in the rule. Changes in these files
+ cause the output to be rebuilt; if these file are missing and
+ Ninja doesn't know how to build them, the build is aborted.
++
+This is the standard form of dependency to be used for e.g. the
+source file of a compile command.
+
+2. _Implicit dependencies_, either as picked up from
+ a `depfile` attribute on a rule or from the syntax +| _dep1_
+ _dep2_+ on the end of a build line. The semantics are identical to
+ explicit dependencies, the only difference is that implicit dependencies
+ don't show up in the `$in` variable.
++
+This is for expressing dependencies that don't show up on the
+command line of the command; for example, for a rule that runs a
+script, the script itself should be an implicit dependency, as
+changes to the script should cause the output to rebuild.
++
+Note that dependencies as loaded through depfiles have slightly different
+semantics, as described in the <<ref_rule,rule reference>>.
+
+3. _Order-only dependencies_, expressed with the syntax +|| _dep1_
+ _dep2_+ on the end of a build line. When these are out of date, the
+ output is not rebuilt until they are built, but changes in order-only
+ dependencies alone do not cause the output to be rebuilt.
++
+Order-only dependencies can be useful for bootstrapping dependencies
+that are only discovered during build time: for example, to generate a
+header file before starting a subsequent compilation step. (Once the
+header is used in compilation, a generated dependency file will then
+express the implicit dependency.)
+
+Variable expansion
+~~~~~~~~~~~~~~~~~~
+
+Variables are expanded in paths (in a `build` or `default` statement)
+and on the right side of a `name = value` statement.
+
+When a `name = value` statement is evaluated, its right-hand side is
+expanded immediately (according to the below scoping rules), and
+from then on `$name` expands to the static string as the result of the
+expansion. It is never the case that you'll need to "double-escape" a
+value to prevent it from getting expanded twice.
+
+All variables are expanded immediately as they're encountered in parsing,
+with one important exception: variables in `rule` blocks are expanded
+when the rule is _used_, not when it is declared. In the following
+example, the `demo` rule prints "this is a demo of bar".
+
+----
+rule demo
+ command = echo "this is a demo of $foo"
+
+build out: demo
+ foo = bar
+----
+
+[[ref_scope]]
+Evaluation and scoping
+~~~~~~~~~~~~~~~~~~~~~~
+
+Top-level variable declarations are scoped to the file they occur in.
+
+The `subninja` keyword, used to include another `.ninja` file,
+introduces a new scope. The included `subninja` file may use the
+variables from the parent file, and shadow their values for the file's
+scope, but it won't affect values of the variables in the parent.
+
+To include another `.ninja` file in the current scope, much like a C
+`#include` statement, use `include` instead of `subninja`.
+
+Variable declarations indented in a `build` block are scoped to the
+`build` block. The full lookup order for a variable expanded in a
+`build` block (or the `rule` is uses) is:
+
+1. Special built-in variables (`$in`, `$out`).
+
+2. Build-level variables from the `build` block.
+
+3. Rule-level variables from the `rule` block (i.e. `$command`).
+ (Note from the above discussion on expansion that these are
+ expanded "late", and may make use of in-scope bindings like `$in`.)
+
+4. File-level variables from the file that the `build` line was in.
+
+5. Variables from the file that included that file using the
+ `subninja` keyword.
+
diff --git a/ninja/doc/style.css b/ninja/doc/style.css
new file mode 100644
index 00000000000..5d14a1c7b7c
--- /dev/null
+++ b/ninja/doc/style.css
@@ -0,0 +1,35 @@
+body {
+ margin: 5ex 10ex;
+ max-width: 80ex;
+ line-height: 1.5;
+ font-family: sans-serif;
+}
+h1, h2, h3 {
+ font-weight: normal;
+}
+pre, code {
+ font-family: x, monospace;
+}
+pre {
+ padding: 1ex;
+ background: #eee;
+ border: solid 1px #ddd;
+ min-width: 0;
+ font-size: 90%;
+}
+code {
+ color: #007;
+}
+div.chapter {
+ margin-top: 4em;
+ border-top: solid 2px black;
+}
+.section .title {
+ font-size: 1.3em;
+}
+.section .section .title {
+ font-size: 1.2em;
+}
+p {
+ margin-top: 0;
+}
diff --git a/ninja/misc/bash-completion b/ninja/misc/bash-completion
new file mode 100644
index 00000000000..2d6975b9450
--- /dev/null
+++ b/ninja/misc/bash-completion
@@ -0,0 +1,40 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add the following to your .bashrc to tab-complete ninja targets
+# . path/to/ninja/misc/bash-completion
+
+_ninja_target() {
+ local cur targets dir line targets_command OPTIND
+ cur="${COMP_WORDS[COMP_CWORD]}"
+
+ if [[ "$cur" == "--"* ]]; then
+ # there is currently only one argument that takes --
+ COMPREPLY=($(compgen -P '--' -W 'version' -- "${cur:2}"))
+ else
+ dir="."
+ line=$(echo ${COMP_LINE} | cut -d" " -f 2-)
+ # filter out all non relevant arguments but keep C for dirs
+ while getopts C:f:j:l:k:nvd:t: opt "${line[@]}"; do
+ case $opt in
+ C) dir="$OPTARG" ;;
+ esac
+ done;
+ targets_command="ninja -C ${dir} -t targets all"
+ targets=$((${targets_command} 2>/dev/null) | awk -F: '{print $1}')
+ COMPREPLY=($(compgen -W "$targets" -- "$cur"))
+ fi
+ return
+}
+complete -F _ninja_target ninja
diff --git a/ninja/misc/inherited-fds.ninja b/ninja/misc/inherited-fds.ninja
new file mode 100644
index 00000000000..671155eb0b3
--- /dev/null
+++ b/ninja/misc/inherited-fds.ninja
@@ -0,0 +1,23 @@
+# This build file prints out a list of open file descriptors in
+# Ninja subprocesses, to help verify we don't accidentally leak
+# any.
+
+# Because one fd leak was in the code managing multiple subprocesses,
+# this test brings up multiple subprocesses and then dumps the fd
+# table of the last one.
+
+# Use like: ./ninja -f misc/inherited-fds.ninja
+
+rule sleep
+ command = sleep 10000
+
+rule dump
+ command = sleep 1; ls -l /proc/self/fd; exit 1
+
+build all: phony a b c d e
+
+build a: sleep
+build b: sleep
+build c: sleep
+build d: sleep
+build e: dump
diff --git a/ninja/misc/long-slow-build.ninja b/ninja/misc/long-slow-build.ninja
new file mode 100644
index 00000000000..46af6bafbe7
--- /dev/null
+++ b/ninja/misc/long-slow-build.ninja
@@ -0,0 +1,38 @@
+# An input file for running a "slow" build.
+# Use like: ninja -f misc/long-slow-build.ninja all
+
+rule sleep
+ command = sleep 1
+ description = SLEEP $out
+
+build 0: sleep README
+build 1: sleep README
+build 2: sleep README
+build 3: sleep README
+build 4: sleep README
+build 5: sleep README
+build 6: sleep README
+build 7: sleep README
+build 8: sleep README
+build 9: sleep README
+build 10: sleep 0
+build 11: sleep 1
+build 12: sleep 2
+build 13: sleep 3
+build 14: sleep 4
+build 15: sleep 5
+build 16: sleep 6
+build 17: sleep 7
+build 18: sleep 8
+build 19: sleep 9
+build 20: sleep 10
+build 21: sleep 11
+build 22: sleep 12
+build 23: sleep 13
+build 24: sleep 14
+build 25: sleep 15
+build 26: sleep 16
+build 27: sleep 17
+build 28: sleep 18
+build 29: sleep 19
+build all: phony 20 21 22 23 24 25 26 27 28 29
diff --git a/ninja/misc/measure.py b/ninja/misc/measure.py
new file mode 100755
index 00000000000..1323fc66d12
--- /dev/null
+++ b/ninja/misc/measure.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""measure the runtime of a command by repeatedly running it.
+"""
+
+import time
+import subprocess
+import sys
+
+devnull = open('/dev/null', 'w')
+
+def run(cmd, repeat=10):
+ print 'sampling:',
+ sys.stdout.flush()
+
+ samples = []
+ for _ in range(repeat):
+ start = time.time()
+ subprocess.call(cmd, stdout=devnull, stderr=devnull)
+ end = time.time()
+ dt = (end - start) * 1000
+ print '%dms' % int(dt),
+ sys.stdout.flush()
+ samples.append(dt)
+ print
+
+ # We're interested in the 'pure' runtime of the code, which is
+ # conceptually the smallest time we'd see if we ran it enough times
+ # such that it got the perfect time slices / disk cache hits.
+ best = min(samples)
+ # Also print how varied the outputs were in an attempt to make it
+ # more obvious if something has gone terribly wrong.
+ err = sum(s - best for s in samples) / float(len(samples))
+ print 'estimate: %dms (mean err %.1fms)' % (best, err)
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print 'usage: measure.py command args...'
+ sys.exit(1)
+ run(cmd=sys.argv[1:])
diff --git a/ninja/misc/ninja-mode.el b/ninja/misc/ninja-mode.el
new file mode 100644
index 00000000000..d939206de1f
--- /dev/null
+++ b/ninja/misc/ninja-mode.el
@@ -0,0 +1,42 @@
+;; Copyright 2011 Google Inc. All Rights Reserved.
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+
+;; Simple emacs mode for editing .ninja files.
+;; Just some syntax highlighting for now.
+
+(setq ninja-keywords
+ (list
+ '("^#.*" . font-lock-comment-face)
+ (cons (concat "^" (regexp-opt '("rule" "build" "subninja" "include"
+ "pool" "default")
+ 'words))
+ font-lock-keyword-face)
+ '("\\([[:alnum:]_]+\\) =" . (1 font-lock-variable-name-face))
+ ;; Variable expansion.
+ '("\\($[[:alnum:]_]+\\)" . (1 font-lock-variable-name-face))
+ ;; Rule names
+ '("rule \\([[:alnum:]_]+\\)" . (1 font-lock-function-name-face))
+ ))
+(define-derived-mode ninja-mode fundamental-mode "ninja"
+ (setq comment-start "#")
+ ; Pass extra "t" to turn off syntax-based fontification -- we don't want
+ ; quoted strings highlighted.
+ (setq font-lock-defaults '(ninja-keywords t))
+ )
+
+(provide 'ninja-mode)
+
+;; Run ninja-mode for files ending in .ninja.
+;;;###autoload
+(add-to-list 'auto-mode-alist '("\\.ninja$" . ninja-mode))
diff --git a/ninja/misc/ninja.vim b/ninja/misc/ninja.vim
new file mode 100644
index 00000000000..d8132678108
--- /dev/null
+++ b/ninja/misc/ninja.vim
@@ -0,0 +1,81 @@
+" ninja build file syntax.
+" Language: ninja build file as described at
+" http://martine.github.com/ninja/manual.html
+" Version: 1.3
+" Last Change: 2013/04/16
+" Maintainer: Nicolas Weber <nicolasweber@gmx.de>
+" Version 1.3 of this script is in the upstream vim repository and will be
+" included in the next vim release. If you change this, please send your change
+" upstream.
+
+" ninja lexer and parser are at
+" https://github.com/martine/ninja/blob/master/src/lexer.in.cc
+" https://github.com/martine/ninja/blob/master/src/manifest_parser.cc
+
+if exists("b:current_syntax")
+ finish
+endif
+
+let s:cpo_save = &cpo
+set cpo&vim
+
+syn case match
+
+syn match ninjaComment /#.*/ contains=@Spell
+
+" Toplevel statements are the ones listed here and
+" toplevel variable assignments (ident '=' value).
+" lexer.in.cc, ReadToken() and manifest_parser.cc, Parse()
+syn match ninjaKeyword "^build\>"
+syn match ninjaKeyword "^rule\>"
+syn match ninjaKeyword "^pool\>"
+syn match ninjaKeyword "^default\>"
+syn match ninjaKeyword "^include\>"
+syn match ninjaKeyword "^subninja\>"
+
+" Both 'build' and 'rule' begin a variable scope that ends
+" on the first line without indent. 'rule' allows only a
+" limited set of magic variables, 'build' allows general
+" let assignments.
+" manifest_parser.cc, ParseRule()
+syn region ninjaRule start="^rule" end="^\ze\S" contains=ALL transparent
+syn keyword ninjaRuleCommand contained command deps depfile description generator
+ \ pool restat rspfile rspfile_content
+
+syn region ninjaPool start="^pool" end="^\ze\S" contains=ALL transparent
+syn keyword ninjaPoolCommand contained depth
+
+" Strings are parsed as follows:
+" lexer.in.cc, ReadEvalString()
+" simple_varname = [a-zA-Z0-9_-]+;
+" varname = [a-zA-Z0-9_.-]+;
+" $$ -> $
+" $\n -> line continuation
+" '$ ' -> escaped space
+" $simple_varname -> variable
+" ${varname} -> variable
+
+syn match ninjaWrapLineOperator "\$$"
+syn match ninjaSimpleVar "\$[a-zA-Z0-9_-]\+"
+syn match ninjaVar "\${[a-zA-Z0-9_.-]\+}"
+
+" operators are:
+" variable assignment =
+" rule definition :
+" implicit dependency |
+" order-only dependency ||
+syn match ninjaOperator "\(=\|:\||\|||\)\ze\s"
+
+hi def link ninjaComment Comment
+hi def link ninjaKeyword Keyword
+hi def link ninjaRuleCommand Statement
+hi def link ninjaPoolCommand Statement
+hi def link ninjaWrapLineOperator ninjaOperator
+hi def link ninjaOperator Operator
+hi def link ninjaSimpleVar ninjaVar
+hi def link ninjaVar Identifier
+
+let b:current_syntax = "ninja"
+
+let &cpo = s:cpo_save
+unlet s:cpo_save
diff --git a/ninja/misc/ninja_syntax.py b/ninja/misc/ninja_syntax.py
new file mode 100644
index 00000000000..d69e3e49e2d
--- /dev/null
+++ b/ninja/misc/ninja_syntax.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+
+"""Python module for generating .ninja files.
+
+Note that this is emphatically not a required piece of Ninja; it's
+just a helpful utility for build-file-generation systems that already
+use Python.
+"""
+
+import textwrap
+import re
+
+def escape_path(word):
+ return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
+
+class Writer(object):
+ def __init__(self, output, width=78):
+ self.output = output
+ self.width = width
+
+ def newline(self):
+ self.output.write('\n')
+
+ def comment(self, text):
+ for line in textwrap.wrap(text, self.width - 2):
+ self.output.write('# ' + line + '\n')
+
+ def variable(self, key, value, indent=0):
+ if value is None:
+ return
+ if isinstance(value, list):
+ value = ' '.join(filter(None, value)) # Filter out empty strings.
+ self._line('%s = %s' % (key, value), indent)
+
+ def pool(self, name, depth):
+ self._line('pool %s' % name)
+ self.variable('depth', depth, indent=1)
+
+ def rule(self, name, command, description=None, depfile=None,
+ generator=False, pool=None, restat=False, rspfile=None,
+ rspfile_content=None, deps=None):
+ self._line('rule %s' % name)
+ self.variable('command', command, indent=1)
+ if description:
+ self.variable('description', description, indent=1)
+ if depfile:
+ self.variable('depfile', depfile, indent=1)
+ if generator:
+ self.variable('generator', '1', indent=1)
+ if pool:
+ self.variable('pool', pool, indent=1)
+ if restat:
+ self.variable('restat', '1', indent=1)
+ if rspfile:
+ self.variable('rspfile', rspfile, indent=1)
+ if rspfile_content:
+ self.variable('rspfile_content', rspfile_content, indent=1)
+ if deps:
+ self.variable('deps', deps, indent=1)
+
+ def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
+ variables=None):
+ outputs = self._as_list(outputs)
+ all_inputs = self._as_list(inputs)[:]
+ out_outputs = list(map(escape_path, outputs))
+ all_inputs = list(map(escape_path, all_inputs))
+
+ if implicit:
+ implicit = map(escape_path, self._as_list(implicit))
+ all_inputs.append('|')
+ all_inputs.extend(implicit)
+ if order_only:
+ order_only = map(escape_path, self._as_list(order_only))
+ all_inputs.append('||')
+ all_inputs.extend(order_only)
+
+ self._line('build %s: %s' % (' '.join(out_outputs),
+ ' '.join([rule] + all_inputs)))
+
+ if variables:
+ if isinstance(variables, dict):
+ iterator = iter(variables.items())
+ else:
+ iterator = iter(variables)
+
+ for key, val in iterator:
+ self.variable(key, val, indent=1)
+
+ return outputs
+
+ def include(self, path):
+ self._line('include %s' % path)
+
+ def subninja(self, path):
+ self._line('subninja %s' % path)
+
+ def default(self, paths):
+ self._line('default %s' % ' '.join(self._as_list(paths)))
+
+ def _count_dollars_before_index(self, s, i):
+ """Returns the number of '$' characters right in front of s[i]."""
+ dollar_count = 0
+ dollar_index = i - 1
+ while dollar_index > 0 and s[dollar_index] == '$':
+ dollar_count += 1
+ dollar_index -= 1
+ return dollar_count
+
+ def _line(self, text, indent=0):
+ """Write 'text' word-wrapped at self.width characters."""
+ leading_space = ' ' * indent
+ while len(leading_space) + len(text) > self.width:
+ # The text is too wide; wrap if possible.
+
+ # Find the rightmost space that would obey our width constraint and
+ # that's not an escaped space.
+ available_space = self.width - len(leading_space) - len(' $')
+ space = available_space
+ while True:
+ space = text.rfind(' ', 0, space)
+ if space < 0 or \
+ self._count_dollars_before_index(text, space) % 2 == 0:
+ break
+
+ if space < 0:
+ # No such space; just use the first unescaped space we can find.
+ space = available_space - 1
+ while True:
+ space = text.find(' ', space + 1)
+ if space < 0 or \
+ self._count_dollars_before_index(text, space) % 2 == 0:
+ break
+ if space < 0:
+ # Give up on breaking.
+ break
+
+ self.output.write(leading_space + text[0:space] + ' $\n')
+ text = text[space+1:]
+
+ # Subsequent lines are continuations, so indent them.
+ leading_space = ' ' * (indent+2)
+
+ self.output.write(leading_space + text + '\n')
+
+ def _as_list(self, input):
+ if input is None:
+ return []
+ if isinstance(input, list):
+ return input
+ return [input]
+
+
+def escape(string):
+ """Escape a string such that it can be embedded into a Ninja file without
+ further interpretation."""
+ assert '\n' not in string, 'Ninja syntax does not allow newlines'
+ # We only have one special metacharacter: '$'.
+ return string.replace('$', '$$')
diff --git a/ninja/misc/ninja_syntax_test.py b/ninja/misc/ninja_syntax_test.py
new file mode 100755
index 00000000000..2aef7ff8307
--- /dev/null
+++ b/ninja/misc/ninja_syntax_test.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+import ninja_syntax
+
+LONGWORD = 'a' * 10
+LONGWORDWITHSPACES = 'a'*5 + '$ ' + 'a'*5
+INDENT = ' '
+
+class TestLineWordWrap(unittest.TestCase):
+ def setUp(self):
+ self.out = StringIO()
+ self.n = ninja_syntax.Writer(self.out, width=8)
+
+ def test_single_long_word(self):
+ # We shouldn't wrap a single long word.
+ self.n._line(LONGWORD)
+ self.assertEqual(LONGWORD + '\n', self.out.getvalue())
+
+ def test_few_long_words(self):
+ # We should wrap a line where the second word is overlong.
+ self.n._line(' '.join(['x', LONGWORD, 'y']))
+ self.assertEqual(' $\n'.join(['x',
+ INDENT + LONGWORD,
+ INDENT + 'y']) + '\n',
+ self.out.getvalue())
+
+ def test_short_words_indented(self):
+ # Test that indent is taking into acount when breaking subsequent lines.
+ # The second line should not be ' to tree', as that's longer than the
+ # test layout width of 8.
+ self.n._line('line_one to tree')
+ self.assertEqual('''\
+line_one $
+ to $
+ tree
+''',
+ self.out.getvalue())
+
+ def test_few_long_words_indented(self):
+ # Check wrapping in the presence of indenting.
+ self.n._line(' '.join(['x', LONGWORD, 'y']), indent=1)
+ self.assertEqual(' $\n'.join([' ' + 'x',
+ ' ' + INDENT + LONGWORD,
+ ' ' + INDENT + 'y']) + '\n',
+ self.out.getvalue())
+
+ def test_escaped_spaces(self):
+ self.n._line(' '.join(['x', LONGWORDWITHSPACES, 'y']))
+ self.assertEqual(' $\n'.join(['x',
+ INDENT + LONGWORDWITHSPACES,
+ INDENT + 'y']) + '\n',
+ self.out.getvalue())
+
+ def test_fit_many_words(self):
+ self.n = ninja_syntax.Writer(self.out, width=78)
+ self.n._line('command = cd ../../chrome; python ../tools/grit/grit/format/repack.py ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak ../out/Debug/gen/chrome/theme_resources_large.pak', 1)
+ self.assertEqual('''\
+ command = cd ../../chrome; python ../tools/grit/grit/format/repack.py $
+ ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak $
+ ../out/Debug/gen/chrome/theme_resources_large.pak
+''',
+ self.out.getvalue())
+
+ def test_leading_space(self):
+ self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
+ self.n.variable('foo', ['', '-bar', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = -bar $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_embedded_dollar_dollar(self):
+ self.n = ninja_syntax.Writer(self.out, width=15) # force wrapping
+ self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = a$$b $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_two_embedded_dollar_dollars(self):
+ self.n = ninja_syntax.Writer(self.out, width=17) # force wrapping
+ self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = a$$b $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_leading_dollar_dollar(self):
+ self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
+ self.n.variable('foo', ['$$b', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = $$b $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+ def test_trailing_dollar_dollar(self):
+ self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
+ self.n.variable('foo', ['a$$', '-somethinglong'], 0)
+ self.assertEqual('''\
+foo = a$$ $
+ -somethinglong
+''',
+ self.out.getvalue())
+
+class TestBuild(unittest.TestCase):
+ def setUp(self):
+ self.out = StringIO()
+ self.n = ninja_syntax.Writer(self.out)
+
+ def test_variables_dict(self):
+ self.n.build('out', 'cc', 'in', variables={'name': 'value'})
+ self.assertEqual('''\
+build out: cc in
+ name = value
+''',
+ self.out.getvalue())
+
+ def test_variables_list(self):
+ self.n.build('out', 'cc', 'in', variables=[('name', 'value')])
+ self.assertEqual('''\
+build out: cc in
+ name = value
+''',
+ self.out.getvalue())
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ninja/misc/packaging/ninja.spec b/ninja/misc/packaging/ninja.spec
new file mode 100644
index 00000000000..f0c46feab5a
--- /dev/null
+++ b/ninja/misc/packaging/ninja.spec
@@ -0,0 +1,42 @@
+Summary: Ninja is a small build system with a focus on speed.
+Name: ninja
+Version: %{ver}
+Release: %{rel}%{?dist}
+Group: Development/Tools
+License: Apache 2.0
+URL: https://github.com/martine/ninja
+Source0: %{name}-%{version}-%{rel}.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{rel}
+
+BuildRequires: asciidoc
+
+%description
+Ninja is yet another build system. It takes as input the interdependencies of files (typically source code and output executables) and
+orchestrates building them, quickly.
+
+Ninja joins a sea of other build systems. Its distinguishing goal is to be fast. It is born from my work on the Chromium browser project,
+which has over 30,000 source files and whose other build systems (including one built from custom non-recursive Makefiles) can take ten
+seconds to start building after changing one file. Ninja is under a second.
+
+%prep
+%setup -q -n %{name}-%{version}-%{rel}
+
+%build
+echo Building..
+./bootstrap.py
+./ninja manual
+
+%install
+mkdir -p %{buildroot}%{_bindir} %{buildroot}%{_docdir}
+cp -p ninja %{buildroot}%{_bindir}/
+
+%files
+%defattr(-, root, root)
+%doc COPYING README doc/manual.html
+%{_bindir}/*
+
+%clean
+rm -rf %{buildroot}
+
+#The changelog is built automatically from Git history
+%changelog
diff --git a/ninja/misc/packaging/rpmbuild.sh b/ninja/misc/packaging/rpmbuild.sh
new file mode 100755
index 00000000000..9b74c6588c9
--- /dev/null
+++ b/ninja/misc/packaging/rpmbuild.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+echo Building ninja RPMs..
+GITROOT=$(git rev-parse --show-toplevel)
+cd $GITROOT
+
+VER=1.0
+REL=$(git rev-parse --short HEAD)git
+RPMTOPDIR=$GITROOT/rpm-build
+echo "Ver: $VER, Release: $REL"
+
+# Create tarball
+mkdir -p $RPMTOPDIR/{SOURCES,SPECS}
+git archive --format=tar --prefix=ninja-${VER}-${REL}/ HEAD | gzip -c > $RPMTOPDIR/SOURCES/ninja-${VER}-${REL}.tar.gz
+
+# Convert git log to RPM's ChangeLog format (shown with rpm -qp --changelog <rpm file>)
+sed -e "s/%{ver}/$VER/" -e "s/%{rel}/$REL/" misc/packaging/ninja.spec > $RPMTOPDIR/SPECS/ninja.spec
+git log --format="* %cd %aN%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $RPMTOPDIR/SPECS/ninja.spec
+
+# Build SRC and binary RPMs
+rpmbuild --quiet \
+ --define "_topdir $RPMTOPDIR" \
+ --define "_rpmdir $PWD" \
+ --define "_srcrpmdir $PWD" \
+ --define '_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' \
+ -ba $RPMTOPDIR/SPECS/ninja.spec &&
+
+rm -rf $RPMTOPDIR &&
+echo Done
diff --git a/ninja/misc/zsh-completion b/ninja/misc/zsh-completion
new file mode 100644
index 00000000000..cd0edfbd97a
--- /dev/null
+++ b/ninja/misc/zsh-completion
@@ -0,0 +1,21 @@
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add the following to your .zshrc to tab-complete ninja targets
+# . path/to/ninja/misc/zsh-completion
+
+_ninja() {
+ reply=(`(ninja -t targets all 2&>/dev/null) | awk -F: '{print $1}'`)
+}
+compctl -K _ninja ninja
diff --git a/ninja/platform_helper.py b/ninja/platform_helper.py
new file mode 100644
index 00000000000..5097f498baf
--- /dev/null
+++ b/ninja/platform_helper.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# Copyright 2011 Google Inc.
+# Copyright 2013 Patrick von Reth <vonreth@kde.org>
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+def platforms():
+ return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
+ 'mingw', 'msvc', 'gnukfreebsd8']
+
+class Platform( object ):
+ def __init__( self, platform):
+ self._platform = platform
+ if not self._platform is None:
+ return
+ self._platform = sys.platform
+ if self._platform.startswith('linux'):
+ self._platform = 'linux'
+ elif self._platform.startswith('freebsd'):
+ self._platform = 'freebsd'
+ elif self._platform.startswith('gnukfreebsd8'):
+ self._platform = 'freebsd'
+ elif self._platform.startswith('openbsd'):
+ self._platform = 'openbsd'
+ elif self._platform.startswith('solaris'):
+ self._platform = 'solaris'
+ elif self._platform.startswith('mingw'):
+ self._platform = 'mingw'
+ elif self._platform.startswith('win'):
+ self._platform = 'msvc'
+
+
+ def platform(self):
+ return self._platform
+
+ def is_linux(self):
+ return self._platform == 'linux'
+
+ def is_mingw(self):
+ return self._platform == 'mingw'
+
+ def is_msvc(self):
+ return self._platform == 'msvc'
+
+ def is_windows(self):
+ return self.is_mingw() or self.is_msvc()
+
+ def is_solaris(self):
+ return self._platform == 'solaris'
+
+ def is_freebsd(self):
+ return self._platform == 'freebsd'
+
+ def is_openbsd(self):
+ return self._platform == 'openbsd'
+
+ def is_sunos5(self):
+ return self._platform == 'sunos5'
diff --git a/ninja/src/browse.cc b/ninja/src/browse.cc
new file mode 100644
index 00000000000..83bfe431d87
--- /dev/null
+++ b/ninja/src/browse.cc
@@ -0,0 +1,65 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "browse.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../build/browse_py.h"
+
+void RunBrowsePython(State* state, const char* ninja_command,
+ const char* initial_target) {
+ // Fork off a Python process and have it run our code via its stdin.
+ // (Actually the Python process becomes the parent.)
+ int pipefd[2];
+ if (pipe(pipefd) < 0) {
+ perror("ninja: pipe");
+ return;
+ }
+
+ pid_t pid = fork();
+ if (pid < 0) {
+ perror("ninja: fork");
+ return;
+ }
+
+ if (pid > 0) { // Parent.
+ close(pipefd[1]);
+ do {
+ if (dup2(pipefd[0], 0) < 0) {
+ perror("ninja: dup2");
+ break;
+ }
+
+ // exec Python, telling it to run the program from stdin.
+ const char* command[] = {
+ NINJA_PYTHON, "-", ninja_command, initial_target, NULL
+ };
+ execvp(command[0], (char**)command);
+ perror("ninja: execvp");
+ } while (false);
+ _exit(1);
+ } else { // Child.
+ close(pipefd[0]);
+
+ // Write the script file into the stdin of the Python process.
+ ssize_t len = write(pipefd[1], kBrowsePy, sizeof(kBrowsePy));
+ if (len < (ssize_t)sizeof(kBrowsePy))
+ perror("ninja: write");
+ close(pipefd[1]);
+ exit(0);
+ }
+}
diff --git a/ninja/src/browse.h b/ninja/src/browse.h
new file mode 100644
index 00000000000..263641fdca9
--- /dev/null
+++ b/ninja/src/browse.h
@@ -0,0 +1,27 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BROWSE_H_
+#define NINJA_BROWSE_H_
+
+struct State;
+
+/// Run in "browse" mode, which execs a Python webserver.
+/// \a ninja_command is the command used to invoke ninja.
+/// \a initial_target is the first target to load.
+/// This function does not return if it runs successfully.
+void RunBrowsePython(State* state, const char* ninja_command,
+ const char* initial_target);
+
+#endif // NINJA_BROWSE_H_
diff --git a/ninja/src/browse.py b/ninja/src/browse.py
new file mode 100755
index 00000000000..9e59bd8effd
--- /dev/null
+++ b/ninja/src/browse.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+#
+# Copyright 2001 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Simple web server for browsing dependency graph data.
+
+This script is inlined into the final executable and spawned by
+it when needed.
+"""
+
+from __future__ import print_function
+
+try:
+ import http.server as httpserver
+except ImportError:
+ import BaseHTTPServer as httpserver
+import os
+import socket
+import subprocess
+import sys
+import webbrowser
+import urllib2
+from collections import namedtuple
+
+Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs'])
+
+# Ideally we'd allow you to navigate to a build edge or a build node,
+# with appropriate views for each. But there's no way to *name* a build
+# edge so we can only display nodes.
+#
+# For a given node, it has at most one input edge, which has n
+# different inputs. This becomes node.inputs. (We leave out the
+# outputs of the input edge due to what follows.) The node can have
+# multiple dependent output edges. Rather than attempting to display
+# those, they are summarized by taking the union of all their outputs.
+#
+# This means there's no single view that shows you all inputs and outputs
+# of an edge. But I think it's less confusing than alternatives.
+
+def match_strip(line, prefix):
+ if not line.startswith(prefix):
+ return (False, line)
+ return (True, line[len(prefix):])
+
+def parse(text):
+ lines = iter(text.split('\n'))
+
+ target = None
+ rule = None
+ inputs = []
+ outputs = []
+
+ try:
+ target = next(lines)[:-1] # strip trailing colon
+
+ line = next(lines)
+ (match, rule) = match_strip(line, ' input: ')
+ if match:
+ (match, line) = match_strip(next(lines), ' ')
+ while match:
+ type = None
+ (match, line) = match_strip(line, '| ')
+ if match:
+ type = 'implicit'
+ (match, line) = match_strip(line, '|| ')
+ if match:
+ type = 'order-only'
+ inputs.append((line, type))
+ (match, line) = match_strip(next(lines), ' ')
+
+ match, _ = match_strip(line, ' outputs:')
+ if match:
+ (match, line) = match_strip(next(lines), ' ')
+ while match:
+ outputs.append(line)
+ (match, line) = match_strip(next(lines), ' ')
+ except StopIteration:
+ pass
+
+ return Node(inputs, rule, target, outputs)
+
+def create_page(body):
+ return '''<!DOCTYPE html>
+<style>
+body {
+ font-family: sans;
+ font-size: 0.8em;
+ margin: 4ex;
+}
+h1 {
+ font-weight: normal;
+ font-size: 140%;
+ text-align: center;
+ margin: 0;
+}
+h2 {
+ font-weight: normal;
+ font-size: 120%;
+}
+tt {
+ font-family: WebKitHack, monospace;
+ white-space: nowrap;
+}
+.filelist {
+ -webkit-columns: auto 2;
+}
+</style>
+''' + body
+
+def generate_html(node):
+ document = ['<h1><tt>%s</tt></h1>' % node.target]
+
+ if node.inputs:
+ document.append('<h2>target is built using rule <tt>%s</tt> of</h2>' %
+ node.rule)
+ if len(node.inputs) > 0:
+ document.append('<div class=filelist>')
+ for input, type in sorted(node.inputs):
+ extra = ''
+ if type:
+ extra = ' (%s)' % type
+ document.append('<tt><a href="?%s">%s</a>%s</tt><br>' %
+ (input, input, extra))
+ document.append('</div>')
+
+ if node.outputs:
+ document.append('<h2>dependent edges build:</h2>')
+ document.append('<div class=filelist>')
+ for output in sorted(node.outputs):
+ document.append('<tt><a href="?%s">%s</a></tt><br>' %
+ (output, output))
+ document.append('</div>')
+
+ return '\n'.join(document)
+
+def ninja_dump(target):
+ proc = subprocess.Popen([sys.argv[1], '-t', 'query', target],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=True)
+ return proc.communicate() + (proc.returncode,)
+
+class RequestHandler(httpserver.BaseHTTPRequestHandler):
+ def do_GET(self):
+ assert self.path[0] == '/'
+ target = urllib2.unquote(self.path[1:])
+
+ if target == '':
+ self.send_response(302)
+ self.send_header('Location', '?' + sys.argv[2])
+ self.end_headers()
+ return
+
+ if not target.startswith('?'):
+ self.send_response(404)
+ self.end_headers()
+ return
+ target = target[1:]
+
+ ninja_output, ninja_error, exit_code = ninja_dump(target)
+ if exit_code == 0:
+ page_body = generate_html(parse(ninja_output.strip()))
+ else:
+ # Relay ninja's error message.
+ page_body = '<h1><tt>%s</tt></h1>' % ninja_error
+
+ self.send_response(200)
+ self.end_headers()
+ self.wfile.write(create_page(page_body).encode('utf-8'))
+
+ def log_message(self, format, *args):
+ pass # Swallow console spam.
+
+port = 8000
+httpd = httpserver.HTTPServer(('',port), RequestHandler)
+try:
+ hostname = socket.gethostname()
+ print('Web server running on %s:%d, ctl-C to abort...' % (hostname,port) )
+ print('Web server pid %d' % os.getpid(), file=sys.stderr )
+ webbrowser.open_new('http://%s:%s' % (hostname, port) )
+ httpd.serve_forever()
+except KeyboardInterrupt:
+ print()
+ pass # Swallow console spam.
+
+
diff --git a/ninja/src/build.cc b/ninja/src/build.cc
new file mode 100644
index 00000000000..5cf9d27dfc6
--- /dev/null
+++ b/ninja/src/build.cc
@@ -0,0 +1,848 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <functional>
+
+#if defined(__SVR4) && defined(__sun)
+#include <sys/termios.h>
+#endif
+
+#include "build_log.h"
+#include "depfile_parser.h"
+#include "deps_log.h"
+#include "disk_interface.h"
+#include "graph.h"
+#include "msvc_helper.h"
+#include "state.h"
+#include "subprocess.h"
+#include "util.h"
+
+namespace {
+
+/// A CommandRunner that doesn't actually run the commands.
+struct DryRunCommandRunner : public CommandRunner {
+ virtual ~DryRunCommandRunner() {}
+
+ // Overridden from CommandRunner:
+ virtual bool CanRunMore();
+ virtual bool StartCommand(Edge* edge);
+ virtual bool WaitForCommand(Result* result);
+
+ private:
+ queue<Edge*> finished_;
+};
+
+bool DryRunCommandRunner::CanRunMore() {
+ return true;
+}
+
+bool DryRunCommandRunner::StartCommand(Edge* edge) {
+ finished_.push(edge);
+ return true;
+}
+
+bool DryRunCommandRunner::WaitForCommand(Result* result) {
+ if (finished_.empty())
+ return false;
+
+ result->status = ExitSuccess;
+ result->edge = finished_.front();
+ finished_.pop();
+ return true;
+}
+
+} // namespace
+
+BuildStatus::BuildStatus(const BuildConfig& config)
+ : config_(config),
+ start_time_millis_(GetTimeMillis()),
+ started_edges_(0), finished_edges_(0), total_edges_(0),
+ progress_status_format_(NULL),
+ overall_rate_(), current_rate_(config.parallelism) {
+
+ // Don't do anything fancy in verbose mode.
+ if (config_.verbosity != BuildConfig::NORMAL)
+ printer_.set_smart_terminal(false);
+
+ progress_status_format_ = getenv("NINJA_STATUS");
+ if (!progress_status_format_)
+ progress_status_format_ = "[%s/%t] ";
+}
+
+void BuildStatus::PlanHasTotalEdges(int total) {
+ total_edges_ = total;
+}
+
+void BuildStatus::BuildEdgeStarted(Edge* edge) {
+ int start_time = (int)(GetTimeMillis() - start_time_millis_);
+ running_edges_.insert(make_pair(edge, start_time));
+ ++started_edges_;
+
+ PrintStatus(edge);
+}
+
+void BuildStatus::BuildEdgeFinished(Edge* edge,
+ bool success,
+ const string& output,
+ int* start_time,
+ int* end_time) {
+ int64_t now = GetTimeMillis();
+ ++finished_edges_;
+
+ RunningEdgeMap::iterator i = running_edges_.find(edge);
+ *start_time = i->second;
+ *end_time = (int)(now - start_time_millis_);
+ running_edges_.erase(i);
+
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+
+ if (printer_.is_smart_terminal())
+ PrintStatus(edge);
+
+ // Print the command that is spewing before printing its output.
+ if (!success)
+ printer_.PrintOnNewLine("FAILED: " + edge->EvaluateCommand() + "\n");
+
+ if (!output.empty()) {
+ // ninja sets stdout and stderr of subprocesses to a pipe, to be able to
+ // check if the output is empty. Some compilers, e.g. clang, check
+ // isatty(stderr) to decide if they should print colored output.
+ // To make it possible to use colored output with ninja, subprocesses should
+ // be run with a flag that forces them to always print color escape codes.
+ // To make sure these escape codes don't show up in a file if ninja's output
+ // is piped to a file, ninja strips ansi escape codes again if it's not
+ // writing to a |smart_terminal_|.
+ // (Launching subprocesses in pseudo ttys doesn't work because there are
+ // only a few hundred available on some systems, and ninja can launch
+ // thousands of parallel compile commands.)
+ // TODO: There should be a flag to disable escape code stripping.
+ string final_output;
+ if (!printer_.is_smart_terminal())
+ final_output = StripAnsiEscapeCodes(output);
+ else
+ final_output = output;
+ printer_.PrintOnNewLine(final_output);
+ }
+}
+
+void BuildStatus::BuildFinished() {
+ printer_.PrintOnNewLine("");
+}
+
+string BuildStatus::FormatProgressStatus(
+ const char* progress_status_format) const {
+ string out;
+ char buf[32];
+ int percent;
+ for (const char* s = progress_status_format; *s != '\0'; ++s) {
+ if (*s == '%') {
+ ++s;
+ switch (*s) {
+ case '%':
+ out.push_back('%');
+ break;
+
+ // Started edges.
+ case 's':
+ snprintf(buf, sizeof(buf), "%d", started_edges_);
+ out += buf;
+ break;
+
+ // Total edges.
+ case 't':
+ snprintf(buf, sizeof(buf), "%d", total_edges_);
+ out += buf;
+ break;
+
+ // Running edges.
+ case 'r':
+ snprintf(buf, sizeof(buf), "%d", started_edges_ - finished_edges_);
+ out += buf;
+ break;
+
+ // Unstarted edges.
+ case 'u':
+ snprintf(buf, sizeof(buf), "%d", total_edges_ - started_edges_);
+ out += buf;
+ break;
+
+ // Finished edges.
+ case 'f':
+ snprintf(buf, sizeof(buf), "%d", finished_edges_);
+ out += buf;
+ break;
+
+ // Overall finished edges per second.
+ case 'o':
+ overall_rate_.UpdateRate(finished_edges_);
+ snprinfRate(overall_rate_.rate(), buf, "%.1f");
+ out += buf;
+ break;
+
+ // Current rate, average over the last '-j' jobs.
+ case 'c':
+ current_rate_.UpdateRate(finished_edges_);
+ snprinfRate(current_rate_.rate(), buf, "%.1f");
+ out += buf;
+ break;
+
+ // Percentage
+ case 'p':
+ percent = (100 * started_edges_) / total_edges_;
+ snprintf(buf, sizeof(buf), "%3i%%", percent);
+ out += buf;
+ break;
+
+ case 'e': {
+ double elapsed = overall_rate_.Elapsed();
+ snprintf(buf, sizeof(buf), "%.3f", elapsed);
+ out += buf;
+ break;
+ }
+
+ default:
+ Fatal("unknown placeholder '%%%c' in $NINJA_STATUS", *s);
+ return "";
+ }
+ } else {
+ out.push_back(*s);
+ }
+ }
+
+ return out;
+}
+
+void BuildStatus::PrintStatus(Edge* edge) {
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+
+ bool force_full_command = config_.verbosity == BuildConfig::VERBOSE;
+
+ string to_print = edge->GetBinding("description");
+ if (to_print.empty() || force_full_command)
+ to_print = edge->GetBinding("command");
+
+ if (finished_edges_ == 0) {
+ overall_rate_.Restart();
+ current_rate_.Restart();
+ }
+ to_print = FormatProgressStatus(progress_status_format_) + to_print;
+
+ printer_.Print(to_print,
+ force_full_command ? LinePrinter::FULL : LinePrinter::ELIDE);
+}
+
+Plan::Plan() : command_edges_(0), wanted_edges_(0) {}
+
+bool Plan::AddTarget(Node* node, string* err) {
+ vector<Node*> stack;
+ return AddSubTarget(node, &stack, err);
+}
+
+bool Plan::AddSubTarget(Node* node, vector<Node*>* stack, string* err) {
+ Edge* edge = node->in_edge();
+ if (!edge) { // Leaf node.
+ if (node->dirty()) {
+ string referenced;
+ if (!stack->empty())
+ referenced = ", needed by '" + stack->back()->path() + "',";
+ *err = "'" + node->path() + "'" + referenced + " missing "
+ "and no known rule to make it";
+ }
+ return false;
+ }
+
+ if (CheckDependencyCycle(node, stack, err))
+ return false;
+
+ if (edge->outputs_ready())
+ return false; // Don't need to do anything.
+
+ // If an entry in want_ does not already exist for edge, create an entry which
+ // maps to false, indicating that we do not want to build this entry itself.
+ pair<map<Edge*, bool>::iterator, bool> want_ins =
+ want_.insert(make_pair(edge, false));
+ bool& want = want_ins.first->second;
+
+ // If we do need to build edge and we haven't already marked it as wanted,
+ // mark it now.
+ if (node->dirty() && !want) {
+ want = true;
+ ++wanted_edges_;
+ if (edge->AllInputsReady())
+ ScheduleWork(edge);
+ if (!edge->is_phony())
+ ++command_edges_;
+ }
+
+ if (!want_ins.second)
+ return true; // We've already processed the inputs.
+
+ stack->push_back(node);
+ for (vector<Node*>::iterator i = edge->inputs_.begin();
+ i != edge->inputs_.end(); ++i) {
+ if (!AddSubTarget(*i, stack, err) && !err->empty())
+ return false;
+ }
+ assert(stack->back() == node);
+ stack->pop_back();
+
+ return true;
+}
+
+bool Plan::CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err) {
+ vector<Node*>::reverse_iterator ri =
+ find(stack->rbegin(), stack->rend(), node);
+ if (ri == stack->rend())
+ return false;
+
+ // Add this node onto the stack to make it clearer where the loop
+ // is.
+ stack->push_back(node);
+
+ vector<Node*>::iterator start = find(stack->begin(), stack->end(), node);
+ *err = "dependency cycle: ";
+ for (vector<Node*>::iterator i = start; i != stack->end(); ++i) {
+ if (i != start)
+ err->append(" -> ");
+ err->append((*i)->path());
+ }
+ return true;
+}
+
+Edge* Plan::FindWork() {
+ if (ready_.empty())
+ return NULL;
+ set<Edge*>::iterator i = ready_.begin();
+ Edge* edge = *i;
+ ready_.erase(i);
+ return edge;
+}
+
+void Plan::ScheduleWork(Edge* edge) {
+ Pool* pool = edge->pool();
+ if (pool->ShouldDelayEdge()) {
+ // The graph is not completely clean. Some Nodes have duplicate Out edges.
+ // We need to explicitly ignore these here, otherwise their work will get
+ // scheduled twice (see https://github.com/martine/ninja/pull/519)
+ if (ready_.count(edge)) {
+ return;
+ }
+ pool->DelayEdge(edge);
+ pool->RetrieveReadyEdges(&ready_);
+ } else {
+ pool->EdgeScheduled(*edge);
+ ready_.insert(edge);
+ }
+}
+
+void Plan::ResumeDelayedJobs(Edge* edge) {
+ edge->pool()->EdgeFinished(*edge);
+ edge->pool()->RetrieveReadyEdges(&ready_);
+}
+
+void Plan::EdgeFinished(Edge* edge) {
+ map<Edge*, bool>::iterator i = want_.find(edge);
+ assert(i != want_.end());
+ if (i->second)
+ --wanted_edges_;
+ want_.erase(i);
+ edge->outputs_ready_ = true;
+
+ // See if this job frees up any delayed jobs
+ ResumeDelayedJobs(edge);
+
+ // Check off any nodes we were waiting for with this edge.
+ for (vector<Node*>::iterator i = edge->outputs_.begin();
+ i != edge->outputs_.end(); ++i) {
+ NodeFinished(*i);
+ }
+}
+
+void Plan::NodeFinished(Node* node) {
+ // See if we we want any edges from this node.
+ for (vector<Edge*>::const_iterator i = node->out_edges().begin();
+ i != node->out_edges().end(); ++i) {
+ map<Edge*, bool>::iterator want_i = want_.find(*i);
+ if (want_i == want_.end())
+ continue;
+
+ // See if the edge is now ready.
+ if ((*i)->AllInputsReady()) {
+ if (want_i->second) {
+ ScheduleWork(*i);
+ } else {
+ // We do not need to build this edge, but we might need to build one of
+ // its dependents.
+ EdgeFinished(*i);
+ }
+ }
+ }
+}
+
+void Plan::CleanNode(DependencyScan* scan, Node* node) {
+ node->set_dirty(false);
+
+ for (vector<Edge*>::const_iterator ei = node->out_edges().begin();
+ ei != node->out_edges().end(); ++ei) {
+ // Don't process edges that we don't actually want.
+ map<Edge*, bool>::iterator want_i = want_.find(*ei);
+ if (want_i == want_.end() || !want_i->second)
+ continue;
+
+ // If all non-order-only inputs for this edge are now clean,
+ // we might have changed the dirty state of the outputs.
+ vector<Node*>::iterator
+ begin = (*ei)->inputs_.begin(),
+ end = (*ei)->inputs_.end() - (*ei)->order_only_deps_;
+ if (find_if(begin, end, mem_fun(&Node::dirty)) == end) {
+ // Recompute most_recent_input and command.
+ Node* most_recent_input = NULL;
+ for (vector<Node*>::iterator ni = begin; ni != end; ++ni) {
+ if (!most_recent_input || (*ni)->mtime() > most_recent_input->mtime())
+ most_recent_input = *ni;
+ }
+ string command = (*ei)->EvaluateCommand(true);
+
+ // Now, recompute the dirty state of each output.
+ bool all_outputs_clean = true;
+ for (vector<Node*>::iterator ni = (*ei)->outputs_.begin();
+ ni != (*ei)->outputs_.end(); ++ni) {
+ if (!(*ni)->dirty())
+ continue;
+
+ if (scan->RecomputeOutputDirty(*ei, most_recent_input, 0,
+ command, *ni)) {
+ (*ni)->MarkDirty();
+ all_outputs_clean = false;
+ } else {
+ CleanNode(scan, *ni);
+ }
+ }
+
+ // If we cleaned all outputs, mark the node as not wanted.
+ if (all_outputs_clean) {
+ want_i->second = false;
+ --wanted_edges_;
+ if (!(*ei)->is_phony())
+ --command_edges_;
+ }
+ }
+ }
+}
+
+void Plan::Dump() {
+ printf("pending: %d\n", (int)want_.size());
+ for (map<Edge*, bool>::iterator i = want_.begin(); i != want_.end(); ++i) {
+ if (i->second)
+ printf("want ");
+ i->first->Dump();
+ }
+ printf("ready: %d\n", (int)ready_.size());
+}
+
+struct RealCommandRunner : public CommandRunner {
+ explicit RealCommandRunner(const BuildConfig& config) : config_(config) {}
+ virtual ~RealCommandRunner() {}
+ virtual bool CanRunMore();
+ virtual bool StartCommand(Edge* edge);
+ virtual bool WaitForCommand(Result* result);
+ virtual vector<Edge*> GetActiveEdges();
+ virtual void Abort();
+
+ const BuildConfig& config_;
+ SubprocessSet subprocs_;
+ map<Subprocess*, Edge*> subproc_to_edge_;
+};
+
+vector<Edge*> RealCommandRunner::GetActiveEdges() {
+ vector<Edge*> edges;
+ for (map<Subprocess*, Edge*>::iterator i = subproc_to_edge_.begin();
+ i != subproc_to_edge_.end(); ++i)
+ edges.push_back(i->second);
+ return edges;
+}
+
+void RealCommandRunner::Abort() {
+ subprocs_.Clear();
+}
+
+bool RealCommandRunner::CanRunMore() {
+ return ((int)subprocs_.running_.size()) < config_.parallelism
+ && ((subprocs_.running_.empty() || config_.max_load_average <= 0.0f)
+ || GetLoadAverage() < config_.max_load_average);
+}
+
+bool RealCommandRunner::StartCommand(Edge* edge) {
+ string command = edge->EvaluateCommand();
+ Subprocess* subproc = subprocs_.Add(command);
+ if (!subproc)
+ return false;
+ subproc_to_edge_.insert(make_pair(subproc, edge));
+
+ return true;
+}
+
+bool RealCommandRunner::WaitForCommand(Result* result) {
+ Subprocess* subproc;
+ while ((subproc = subprocs_.NextFinished()) == NULL) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return false;
+ }
+
+ result->status = subproc->Finish();
+ result->output = subproc->GetOutput();
+
+ map<Subprocess*, Edge*>::iterator i = subproc_to_edge_.find(subproc);
+ result->edge = i->second;
+ subproc_to_edge_.erase(i);
+
+ delete subproc;
+ return true;
+}
+
+Builder::Builder(State* state, const BuildConfig& config,
+ BuildLog* build_log, DepsLog* deps_log,
+ DiskInterface* disk_interface)
+ : state_(state), config_(config), disk_interface_(disk_interface),
+ scan_(state, build_log, deps_log, disk_interface) {
+ status_ = new BuildStatus(config);
+}
+
+Builder::~Builder() {
+ Cleanup();
+}
+
+void Builder::Cleanup() {
+ if (command_runner_.get()) {
+ vector<Edge*> active_edges = command_runner_->GetActiveEdges();
+ command_runner_->Abort();
+
+ for (vector<Edge*>::iterator i = active_edges.begin();
+ i != active_edges.end(); ++i) {
+ string depfile = (*i)->GetBinding("depfile");
+ for (vector<Node*>::iterator ni = (*i)->outputs_.begin();
+ ni != (*i)->outputs_.end(); ++ni) {
+ // Only delete this output if it was actually modified. This is
+ // important for things like the generator where we don't want to
+ // delete the manifest file if we can avoid it. But if the rule
+ // uses a depfile, always delete. (Consider the case where we
+ // need to rebuild an output because of a modified header file
+ // mentioned in a depfile, and the command touches its depfile
+ // but is interrupted before it touches its output file.)
+ if (!depfile.empty() ||
+ (*ni)->mtime() != disk_interface_->Stat((*ni)->path())) {
+ disk_interface_->RemoveFile((*ni)->path());
+ }
+ }
+ if (!depfile.empty())
+ disk_interface_->RemoveFile(depfile);
+ }
+ }
+}
+
+Node* Builder::AddTarget(const string& name, string* err) {
+ Node* node = state_->LookupNode(name);
+ if (!node) {
+ *err = "unknown target: '" + name + "'";
+ return NULL;
+ }
+ if (!AddTarget(node, err))
+ return NULL;
+ return node;
+}
+
+bool Builder::AddTarget(Node* node, string* err) {
+ node->StatIfNecessary(disk_interface_);
+ if (Edge* in_edge = node->in_edge()) {
+ if (!scan_.RecomputeDirty(in_edge, err))
+ return false;
+ if (in_edge->outputs_ready())
+ return true; // Nothing to do.
+ }
+
+ if (!plan_.AddTarget(node, err))
+ return false;
+
+ return true;
+}
+
+bool Builder::AlreadyUpToDate() const {
+ return !plan_.more_to_do();
+}
+
+bool Builder::Build(string* err) {
+ assert(!AlreadyUpToDate());
+
+ status_->PlanHasTotalEdges(plan_.command_edge_count());
+ int pending_commands = 0;
+ int failures_allowed = config_.failures_allowed;
+
+ // Set up the command runner if we haven't done so already.
+ if (!command_runner_.get()) {
+ if (config_.dry_run)
+ command_runner_.reset(new DryRunCommandRunner);
+ else
+ command_runner_.reset(new RealCommandRunner(config_));
+ }
+
+ // This main loop runs the entire build process.
+ // It is structured like this:
+ // First, we attempt to start as many commands as allowed by the
+ // command runner.
+ // Second, we attempt to wait for / reap the next finished command.
+ while (plan_.more_to_do()) {
+ // See if we can start any more commands.
+ if (failures_allowed && command_runner_->CanRunMore()) {
+ if (Edge* edge = plan_.FindWork()) {
+ if (!StartEdge(edge, err)) {
+ status_->BuildFinished();
+ return false;
+ }
+
+ if (edge->is_phony()) {
+ plan_.EdgeFinished(edge);
+ } else {
+ ++pending_commands;
+ }
+
+ // We made some progress; go back to the main loop.
+ continue;
+ }
+ }
+
+ // See if we can reap any finished commands.
+ if (pending_commands) {
+ CommandRunner::Result result;
+ if (!command_runner_->WaitForCommand(&result) ||
+ result.status == ExitInterrupted) {
+ status_->BuildFinished();
+ *err = "interrupted by user";
+ return false;
+ }
+
+ --pending_commands;
+ FinishCommand(&result);
+
+ if (!result.success()) {
+ if (failures_allowed)
+ failures_allowed--;
+ }
+
+ // We made some progress; start the main loop over.
+ continue;
+ }
+
+ // If we get here, we cannot make any more progress.
+ status_->BuildFinished();
+ if (failures_allowed == 0) {
+ if (config_.failures_allowed > 1)
+ *err = "subcommands failed";
+ else
+ *err = "subcommand failed";
+ } else if (failures_allowed < config_.failures_allowed)
+ *err = "cannot make progress due to previous errors";
+ else
+ *err = "stuck [this is a bug]";
+
+ return false;
+ }
+
+ status_->BuildFinished();
+ return true;
+}
+
+bool Builder::StartEdge(Edge* edge, string* err) {
+ METRIC_RECORD("StartEdge");
+ if (edge->is_phony())
+ return true;
+
+ status_->BuildEdgeStarted(edge);
+
+ // Create directories necessary for outputs.
+ // XXX: this will block; do we care?
+ for (vector<Node*>::iterator i = edge->outputs_.begin();
+ i != edge->outputs_.end(); ++i) {
+ if (!disk_interface_->MakeDirs((*i)->path()))
+ return false;
+ }
+
+ // Create response file, if needed
+ // XXX: this may also block; do we care?
+ string rspfile = edge->GetBinding("rspfile");
+ if (!rspfile.empty()) {
+ string content = edge->GetBinding("rspfile_content");
+ if (!disk_interface_->WriteFile(rspfile, content))
+ return false;
+ }
+
+ // start command computing and run it
+ if (!command_runner_->StartCommand(edge)) {
+ err->assign("command '" + edge->EvaluateCommand() + "' failed.");
+ return false;
+ }
+
+ return true;
+}
+
+void Builder::FinishCommand(CommandRunner::Result* result) {
+ METRIC_RECORD("FinishCommand");
+
+ Edge* edge = result->edge;
+
+ // First try to extract dependencies from the result, if any.
+ // This must happen first as it filters the command output (we want
+ // to filter /showIncludes output, even on compile failure) and
+ // extraction itself can fail, which makes the command fail from a
+ // build perspective.
+ vector<Node*> deps_nodes;
+ string deps_type = edge->GetBinding("deps");
+ if (!deps_type.empty()) {
+ string extract_err;
+ if (!ExtractDeps(result, deps_type, &deps_nodes, &extract_err) &&
+ result->success()) {
+ if (!result->output.empty())
+ result->output.append("\n");
+ result->output.append(extract_err);
+ result->status = ExitFailure;
+ }
+ }
+
+ int start_time, end_time;
+ status_->BuildEdgeFinished(edge, result->success(), result->output,
+ &start_time, &end_time);
+
+ // The rest of this function only applies to successful commands.
+ if (!result->success())
+ return;
+
+ // Restat the edge outputs, if necessary.
+ TimeStamp restat_mtime = 0;
+ if (edge->GetBindingBool("restat") && !config_.dry_run) {
+ bool node_cleaned = false;
+
+ for (vector<Node*>::iterator i = edge->outputs_.begin();
+ i != edge->outputs_.end(); ++i) {
+ TimeStamp new_mtime = disk_interface_->Stat((*i)->path());
+ if ((*i)->mtime() == new_mtime) {
+ // The rule command did not change the output. Propagate the clean
+ // state through the build graph.
+ // Note that this also applies to nonexistent outputs (mtime == 0).
+ plan_.CleanNode(&scan_, *i);
+ node_cleaned = true;
+ }
+ }
+
+ if (node_cleaned) {
+ // If any output was cleaned, find the most recent mtime of any
+ // (existing) non-order-only input or the depfile.
+ for (vector<Node*>::iterator i = edge->inputs_.begin();
+ i != edge->inputs_.end() - edge->order_only_deps_; ++i) {
+ TimeStamp input_mtime = disk_interface_->Stat((*i)->path());
+ if (input_mtime > restat_mtime)
+ restat_mtime = input_mtime;
+ }
+
+ string depfile = edge->GetBinding("depfile");
+ if (restat_mtime != 0 && !depfile.empty()) {
+ TimeStamp depfile_mtime = disk_interface_->Stat(depfile);
+ if (depfile_mtime > restat_mtime)
+ restat_mtime = depfile_mtime;
+ }
+
+ // The total number of edges in the plan may have changed as a result
+ // of a restat.
+ status_->PlanHasTotalEdges(plan_.command_edge_count());
+ }
+ }
+
+ plan_.EdgeFinished(edge);
+
+ // Delete any left over response file.
+ string rspfile = edge->GetBinding("rspfile");
+ if (!rspfile.empty())
+ disk_interface_->RemoveFile(rspfile);
+
+ if (scan_.build_log()) {
+ scan_.build_log()->RecordCommand(edge, start_time, end_time,
+ restat_mtime);
+ }
+
+ if (!deps_type.empty() && !config_.dry_run) {
+ assert(edge->outputs_.size() == 1 && "should have been rejected by parser");
+ Node* out = edge->outputs_[0];
+ TimeStamp deps_mtime = disk_interface_->Stat(out->path());
+ scan_.deps_log()->RecordDeps(out, deps_mtime, deps_nodes);
+ }
+
+}
+
+bool Builder::ExtractDeps(CommandRunner::Result* result,
+ const string& deps_type,
+ vector<Node*>* deps_nodes,
+ string* err) {
+#ifdef _WIN32
+ if (deps_type == "msvc") {
+ CLParser parser;
+ result->output = parser.Parse(result->output);
+ for (set<string>::iterator i = parser.includes_.begin();
+ i != parser.includes_.end(); ++i) {
+ deps_nodes->push_back(state_->GetNode(*i));
+ }
+ } else
+#endif
+ if (deps_type == "gcc") {
+ string depfile = result->edge->GetBinding("depfile");
+ if (depfile.empty()) {
+ *err = string("edge with deps=gcc but no depfile makes no sense");
+ return false;
+ }
+
+ string content = disk_interface_->ReadFile(depfile, err);
+ if (!err->empty())
+ return false;
+ if (content.empty())
+ return true;
+
+ DepfileParser deps;
+ if (!deps.Parse(&content, err))
+ return false;
+
+ // XXX check depfile matches expected output.
+ deps_nodes->reserve(deps.ins_.size());
+ for (vector<StringPiece>::iterator i = deps.ins_.begin();
+ i != deps.ins_.end(); ++i) {
+ if (!CanonicalizePath(const_cast<char*>(i->str_), &i->len_, err))
+ return false;
+ deps_nodes->push_back(state_->GetNode(*i));
+ }
+
+ if (disk_interface_->RemoveFile(depfile) < 0) {
+ *err = string("deleting depfile: ") + strerror(errno) + string("\n");
+ return false;
+ }
+ } else {
+ Fatal("unknown deps type '%s'", deps_type.c_str());
+ }
+
+ return true;
+}
diff --git a/ninja/src/build.h b/ninja/src/build.h
new file mode 100644
index 00000000000..2715c0cf722
--- /dev/null
+++ b/ninja/src/build.h
@@ -0,0 +1,279 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BUILD_H_
+#define NINJA_BUILD_H_
+
+#include <cstdio>
+#include <map>
+#include <memory>
+#include <queue>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "graph.h" // XXX needed for DependencyScan; should rearrange.
+#include "exit_status.h"
+#include "line_printer.h"
+#include "metrics.h"
+#include "util.h" // int64_t
+
+struct BuildLog;
+struct BuildStatus;
+struct DiskInterface;
+struct Edge;
+struct Node;
+struct State;
+
+/// Plan stores the state of a build plan: what we intend to build,
+/// which steps we're ready to execute.
+struct Plan {
+ Plan();
+
+ /// Add a target to our plan (including all its dependencies).
+ /// Returns false if we don't need to build this target; may
+ /// fill in |err| with an error message if there's a problem.
+ bool AddTarget(Node* node, string* err);
+
+ // Pop a ready edge off the queue of edges to build.
+ // Returns NULL if there's no work to do.
+ Edge* FindWork();
+
+ /// Returns true if there's more work to be done.
+ bool more_to_do() const { return wanted_edges_; }
+
+ /// Dumps the current state of the plan.
+ void Dump();
+
+ /// Mark an edge as done building. Used internally and by
+ /// tests.
+ void EdgeFinished(Edge* edge);
+
+ /// Clean the given node during the build.
+ void CleanNode(DependencyScan* scan, Node* node);
+
+ /// Number of edges with commands to run.
+ int command_edge_count() const { return command_edges_; }
+
+private:
+ bool AddSubTarget(Node* node, vector<Node*>* stack, string* err);
+ bool CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err);
+ void NodeFinished(Node* node);
+
+ /// Submits a ready edge as a candidate for execution.
+ /// The edge may be delayed from running, for example if it's a member of a
+ /// currently-full pool.
+ void ScheduleWork(Edge* edge);
+
+ /// Allows jobs blocking on |edge| to potentially resume.
+ /// For example, if |edge| is a member of a pool, calling this may schedule
+ /// previously pending jobs in that pool.
+ void ResumeDelayedJobs(Edge* edge);
+
+ /// Keep track of which edges we want to build in this plan. If this map does
+ /// not contain an entry for an edge, we do not want to build the entry or its
+ /// dependents. If an entry maps to false, we do not want to build it, but we
+ /// might want to build one of its dependents. If the entry maps to true, we
+ /// want to build it.
+ map<Edge*, bool> want_;
+
+ set<Edge*> ready_;
+
+ /// Total number of edges that have commands (not phony).
+ int command_edges_;
+
+ /// Total remaining number of wanted edges.
+ int wanted_edges_;
+};
+
+/// CommandRunner is an interface that wraps running the build
+/// subcommands. This allows tests to abstract out running commands.
+/// RealCommandRunner is an implementation that actually runs commands.
+struct CommandRunner {
+ virtual ~CommandRunner() {}
+ virtual bool CanRunMore() = 0;
+ virtual bool StartCommand(Edge* edge) = 0;
+
+ /// The result of waiting for a command.
+ struct Result {
+ Result() : edge(NULL) {}
+ Edge* edge;
+ ExitStatus status;
+ string output;
+ bool success() const { return status == ExitSuccess; }
+ };
+ /// Wait for a command to complete, or return false if interrupted.
+ virtual bool WaitForCommand(Result* result) = 0;
+
+ virtual vector<Edge*> GetActiveEdges() { return vector<Edge*>(); }
+ virtual void Abort() {}
+};
+
+/// Options (e.g. verbosity, parallelism) passed to a build.
+struct BuildConfig {
+ BuildConfig() : verbosity(NORMAL), dry_run(false), parallelism(1),
+ failures_allowed(1), max_load_average(-0.0f) {}
+
+ enum Verbosity {
+ NORMAL,
+ QUIET, // No output -- used when testing.
+ VERBOSE
+ };
+ Verbosity verbosity;
+ bool dry_run;
+ int parallelism;
+ int failures_allowed;
+ /// The maximum load average we must not exceed. A negative value
+ /// means that we do not have any limit.
+ double max_load_average;
+};
+
+/// Builder wraps the build process: starting commands, updating status.
+struct Builder {
+ Builder(State* state, const BuildConfig& config,
+ BuildLog* build_log, DepsLog* deps_log,
+ DiskInterface* disk_interface);
+ ~Builder();
+
+ /// Clean up after interrupted commands by deleting output files.
+ void Cleanup();
+
+ Node* AddTarget(const string& name, string* err);
+
+ /// Add a target to the build, scanning dependencies.
+ /// @return false on error.
+ bool AddTarget(Node* target, string* err);
+
+ /// Returns true if the build targets are already up to date.
+ bool AlreadyUpToDate() const;
+
+ /// Run the build. Returns false on error.
+ /// It is an error to call this function when AlreadyUpToDate() is true.
+ bool Build(string* err);
+
+ bool StartEdge(Edge* edge, string* err);
+ void FinishCommand(CommandRunner::Result* result);
+
+ /// Used for tests.
+ void SetBuildLog(BuildLog* log) {
+ scan_.set_build_log(log);
+ }
+
+ State* state_;
+ const BuildConfig& config_;
+ Plan plan_;
+ auto_ptr<CommandRunner> command_runner_;
+ BuildStatus* status_;
+
+ private:
+ bool ExtractDeps(CommandRunner::Result* result, const string& deps_type,
+ vector<Node*>* deps_nodes, string* err);
+
+ DiskInterface* disk_interface_;
+ DependencyScan scan_;
+
+ // Unimplemented copy ctor and operator= ensure we don't copy the auto_ptr.
+ Builder(const Builder &other); // DO NOT IMPLEMENT
+ void operator=(const Builder &other); // DO NOT IMPLEMENT
+};
+
+/// Tracks the status of a build: completion fraction, printing updates.
+struct BuildStatus {
+ explicit BuildStatus(const BuildConfig& config);
+ void PlanHasTotalEdges(int total);
+ void BuildEdgeStarted(Edge* edge);
+ void BuildEdgeFinished(Edge* edge, bool success, const string& output,
+ int* start_time, int* end_time);
+ void BuildFinished();
+
+ /// Format the progress status string by replacing the placeholders.
+ /// See the user manual for more information about the available
+ /// placeholders.
+ /// @param progress_status_format The format of the progress status.
+ string FormatProgressStatus(const char* progress_status_format) const;
+
+ private:
+ void PrintStatus(Edge* edge);
+
+ const BuildConfig& config_;
+
+ /// Time the build started.
+ int64_t start_time_millis_;
+
+ int started_edges_, finished_edges_, total_edges_;
+
+ /// Map of running edge to time the edge started running.
+ typedef map<Edge*, int> RunningEdgeMap;
+ RunningEdgeMap running_edges_;
+
+ /// Prints progress output.
+ LinePrinter printer_;
+
+ /// The custom progress status format to use.
+ const char* progress_status_format_;
+
+ template<size_t S>
+ void snprinfRate(double rate, char(&buf)[S], const char* format) const {
+ if (rate == -1) snprintf(buf, S, "?");
+ else snprintf(buf, S, format, rate);
+ }
+
+ struct RateInfo {
+ RateInfo() : rate_(-1) {}
+
+ void Restart() { stopwatch_.Restart(); }
+ double Elapsed() const { return stopwatch_.Elapsed(); }
+ double rate() { return rate_; }
+
+ void UpdateRate(int edges) {
+ if (edges && stopwatch_.Elapsed())
+ rate_ = edges / stopwatch_.Elapsed();
+ }
+
+ private:
+ double rate_;
+ Stopwatch stopwatch_;
+ };
+
+ struct SlidingRateInfo {
+ SlidingRateInfo(int n) : rate_(-1), N(n), last_update_(-1) {}
+
+ void Restart() { stopwatch_.Restart(); }
+ double rate() { return rate_; }
+
+ void UpdateRate(int update_hint) {
+ if (update_hint == last_update_)
+ return;
+ last_update_ = update_hint;
+
+ if (times_.size() == N)
+ times_.pop();
+ times_.push(stopwatch_.Elapsed());
+ if (times_.back() != times_.front())
+ rate_ = times_.size() / (times_.back() - times_.front());
+ }
+
+ private:
+ double rate_;
+ Stopwatch stopwatch_;
+ const size_t N;
+ queue<double> times_;
+ int last_update_;
+ };
+
+ mutable RateInfo overall_rate_;
+ mutable SlidingRateInfo current_rate_;
+};
+
+#endif // NINJA_BUILD_H_
diff --git a/ninja/src/build_log.cc b/ninja/src/build_log.cc
new file mode 100644
index 00000000000..6b730028510
--- /dev/null
+++ b/ninja/src/build_log.cc
@@ -0,0 +1,383 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build_log.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef _WIN32
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+#include <unistd.h>
+#endif
+
+#include "build.h"
+#include "graph.h"
+#include "metrics.h"
+#include "util.h"
+
+// Implementation details:
+// Each run's log appends to the log file.
+// To load, we run through all log entries in series, throwing away
+// older runs.
+// Once the number of redundant entries exceeds a threshold, we write
+// out a new file and replace the existing one with it.
+
+namespace {
+
+const char kFileSignature[] = "# ninja log v%d\n";
+const int kOldestSupportedVersion = 4;
+const int kCurrentVersion = 5;
+
+// 64bit MurmurHash2, by Austin Appleby
+#if defined(_MSC_VER)
+#define BIG_CONSTANT(x) (x)
+#else // defined(_MSC_VER)
+#define BIG_CONSTANT(x) (x##LLU)
+#endif // !defined(_MSC_VER)
+inline
+uint64_t MurmurHash64A(const void* key, size_t len) {
+ static const uint64_t seed = 0xDECAFBADDECAFBADull;
+ const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
+ const int r = 47;
+ uint64_t h = seed ^ (len * m);
+ const uint64_t * data = (const uint64_t *)key;
+ const uint64_t * end = data + (len/8);
+ while (data != end) {
+ uint64_t k = *data++;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h ^= k;
+ h *= m;
+ }
+ const unsigned char* data2 = (const unsigned char*)data;
+ switch (len & 7)
+ {
+ case 7: h ^= uint64_t(data2[6]) << 48;
+ case 6: h ^= uint64_t(data2[5]) << 40;
+ case 5: h ^= uint64_t(data2[4]) << 32;
+ case 4: h ^= uint64_t(data2[3]) << 24;
+ case 3: h ^= uint64_t(data2[2]) << 16;
+ case 2: h ^= uint64_t(data2[1]) << 8;
+ case 1: h ^= uint64_t(data2[0]);
+ h *= m;
+ };
+ h ^= h >> r;
+ h *= m;
+ h ^= h >> r;
+ return h;
+}
+#undef BIG_CONSTANT
+
+
+} // namespace
+
+// static
+uint64_t BuildLog::LogEntry::HashCommand(StringPiece command) {
+ return MurmurHash64A(command.str_, command.len_);
+}
+
+BuildLog::LogEntry::LogEntry(const string& output)
+ : output(output) {}
+
+BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
+ int start_time, int end_time, TimeStamp restat_mtime)
+ : output(output), command_hash(command_hash),
+ start_time(start_time), end_time(end_time), restat_mtime(restat_mtime)
+{}
+
+BuildLog::BuildLog()
+ : log_file_(NULL), needs_recompaction_(false) {}
+
+BuildLog::~BuildLog() {
+ Close();
+}
+
+bool BuildLog::OpenForWrite(const string& path, string* err) {
+ if (needs_recompaction_) {
+ Close();
+ if (!Recompact(path, err))
+ return false;
+ }
+
+ log_file_ = fopen(path.c_str(), "ab");
+ if (!log_file_) {
+ *err = strerror(errno);
+ return false;
+ }
+ setvbuf(log_file_, NULL, _IOLBF, BUFSIZ);
+ SetCloseOnExec(fileno(log_file_));
+
+ // Opening a file in append mode doesn't set the file pointer to the file's
+ // end on Windows. Do that explicitly.
+ fseek(log_file_, 0, SEEK_END);
+
+ if (ftell(log_file_) == 0) {
+ if (fprintf(log_file_, kFileSignature, kCurrentVersion) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void BuildLog::RecordCommand(Edge* edge, int start_time, int end_time,
+ TimeStamp restat_mtime) {
+ string command = edge->EvaluateCommand(true);
+ uint64_t command_hash = LogEntry::HashCommand(command);
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ const string& path = (*out)->path();
+ Entries::iterator i = entries_.find(path);
+ LogEntry* log_entry;
+ if (i != entries_.end()) {
+ log_entry = i->second;
+ } else {
+ log_entry = new LogEntry(path);
+ entries_.insert(Entries::value_type(log_entry->output, log_entry));
+ }
+ log_entry->command_hash = command_hash;
+ log_entry->start_time = start_time;
+ log_entry->end_time = end_time;
+ log_entry->restat_mtime = restat_mtime;
+
+ if (log_file_)
+ WriteEntry(log_file_, *log_entry);
+ }
+}
+
+void BuildLog::Close() {
+ if (log_file_)
+ fclose(log_file_);
+ log_file_ = NULL;
+}
+
+struct LineReader {
+ explicit LineReader(FILE* file)
+ : file_(file), buf_end_(buf_), line_start_(buf_), line_end_(NULL) {
+ memset(buf_, 0, sizeof(buf_));
+ }
+
+ // Reads a \n-terminated line from the file passed to the constructor.
+ // On return, *line_start points to the beginning of the next line, and
+ // *line_end points to the \n at the end of the line. If no newline is seen
+ // in a fixed buffer size, *line_end is set to NULL. Returns false on EOF.
+ bool ReadLine(char** line_start, char** line_end) {
+ if (line_start_ >= buf_end_ || !line_end_) {
+ // Buffer empty, refill.
+ size_t size_read = fread(buf_, 1, sizeof(buf_), file_);
+ if (!size_read)
+ return false;
+ line_start_ = buf_;
+ buf_end_ = buf_ + size_read;
+ } else {
+ // Advance to next line in buffer.
+ line_start_ = line_end_ + 1;
+ }
+
+ line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
+ if (!line_end_) {
+ // No newline. Move rest of data to start of buffer, fill rest.
+ size_t already_consumed = line_start_ - buf_;
+ size_t size_rest = (buf_end_ - buf_) - already_consumed;
+ memmove(buf_, line_start_, size_rest);
+
+ size_t read = fread(buf_ + size_rest, 1, sizeof(buf_) - size_rest, file_);
+ buf_end_ = buf_ + size_rest + read;
+ line_start_ = buf_;
+ line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
+ }
+
+ *line_start = line_start_;
+ *line_end = line_end_;
+ return true;
+ }
+
+ private:
+ FILE* file_;
+ char buf_[256 << 10];
+ char* buf_end_; // Points one past the last valid byte in |buf_|.
+
+ char* line_start_;
+ // Points at the next \n in buf_ after line_start, or NULL.
+ char* line_end_;
+};
+
+bool BuildLog::Load(const string& path, string* err) {
+ METRIC_RECORD(".ninja_log load");
+ FILE* file = fopen(path.c_str(), "r");
+ if (!file) {
+ if (errno == ENOENT)
+ return true;
+ *err = strerror(errno);
+ return false;
+ }
+
+ int log_version = 0;
+ int unique_entry_count = 0;
+ int total_entry_count = 0;
+
+ LineReader reader(file);
+ char* line_start = 0;
+ char* line_end = 0;
+ while (reader.ReadLine(&line_start, &line_end)) {
+ if (!log_version) {
+ sscanf(line_start, kFileSignature, &log_version);
+
+ if (log_version < kOldestSupportedVersion) {
+ *err = ("build log version invalid, perhaps due to being too old; "
+ "starting over");
+ fclose(file);
+ unlink(path.c_str());
+ // Don't report this as a failure. An empty build log will cause
+ // us to rebuild the outputs anyway.
+ return true;
+ }
+ }
+
+ // If no newline was found in this chunk, read the next.
+ if (!line_end)
+ continue;
+
+ const char kFieldSeparator = '\t';
+
+ char* start = line_start;
+ char* end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ *end = 0;
+
+ int start_time = 0, end_time = 0;
+ TimeStamp restat_mtime = 0;
+
+ start_time = atoi(start);
+ start = end + 1;
+
+ end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ *end = 0;
+ end_time = atoi(start);
+ start = end + 1;
+
+ end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ *end = 0;
+ restat_mtime = atol(start);
+ start = end + 1;
+
+ end = (char*)memchr(start, kFieldSeparator, line_end - start);
+ if (!end)
+ continue;
+ string output = string(start, end - start);
+
+ start = end + 1;
+ end = line_end;
+
+ LogEntry* entry;
+ Entries::iterator i = entries_.find(output);
+ if (i != entries_.end()) {
+ entry = i->second;
+ } else {
+ entry = new LogEntry(output);
+ entries_.insert(Entries::value_type(entry->output, entry));
+ ++unique_entry_count;
+ }
+ ++total_entry_count;
+
+ entry->start_time = start_time;
+ entry->end_time = end_time;
+ entry->restat_mtime = restat_mtime;
+ if (log_version >= 5) {
+ char c = *end; *end = '\0';
+ entry->command_hash = (uint64_t)strtoull(start, NULL, 16);
+ *end = c;
+ } else {
+ entry->command_hash = LogEntry::HashCommand(StringPiece(start,
+ end - start));
+ }
+ }
+ fclose(file);
+
+ if (!line_start) {
+ return true; // file was empty
+ }
+
+ // Decide whether it's time to rebuild the log:
+ // - if we're upgrading versions
+ // - if it's getting large
+ int kMinCompactionEntryCount = 100;
+ int kCompactionRatio = 3;
+ if (log_version < kCurrentVersion) {
+ needs_recompaction_ = true;
+ } else if (total_entry_count > kMinCompactionEntryCount &&
+ total_entry_count > unique_entry_count * kCompactionRatio) {
+ needs_recompaction_ = true;
+ }
+
+ return true;
+}
+
+BuildLog::LogEntry* BuildLog::LookupByOutput(const string& path) {
+ Entries::iterator i = entries_.find(path);
+ if (i != entries_.end())
+ return i->second;
+ return NULL;
+}
+
+void BuildLog::WriteEntry(FILE* f, const LogEntry& entry) {
+ fprintf(f, "%d\t%d\t%d\t%s\t%" PRIx64 "\n",
+ entry.start_time, entry.end_time, entry.restat_mtime,
+ entry.output.c_str(), entry.command_hash);
+}
+
+bool BuildLog::Recompact(const string& path, string* err) {
+ METRIC_RECORD(".ninja_log recompact");
+ printf("Recompacting log...\n");
+
+ string temp_path = path + ".recompact";
+ FILE* f = fopen(temp_path.c_str(), "wb");
+ if (!f) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (fprintf(f, kFileSignature, kCurrentVersion) < 0) {
+ *err = strerror(errno);
+ fclose(f);
+ return false;
+ }
+
+ for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
+ WriteEntry(f, *i->second);
+ }
+
+ fclose(f);
+ if (unlink(path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (rename(temp_path.c_str(), path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ return true;
+}
diff --git a/ninja/src/build_log.h b/ninja/src/build_log.h
new file mode 100644
index 00000000000..6eae89f87ee
--- /dev/null
+++ b/ninja/src/build_log.h
@@ -0,0 +1,86 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BUILD_LOG_H_
+#define NINJA_BUILD_LOG_H_
+
+#include <string>
+#include <stdio.h>
+using namespace std;
+
+#include "hash_map.h"
+#include "timestamp.h"
+#include "util.h" // uint64_t
+
+struct Edge;
+
+/// Store a log of every command ran for every build.
+/// It has a few uses:
+///
+/// 1) (hashes of) command lines for existing output files, so we know
+/// when we need to rebuild due to the command changing
+/// 2) timing information, perhaps for generating reports
+/// 3) restat information
+struct BuildLog {
+ BuildLog();
+ ~BuildLog();
+
+ bool OpenForWrite(const string& path, string* err);
+ void RecordCommand(Edge* edge, int start_time, int end_time,
+ TimeStamp restat_mtime = 0);
+ void Close();
+
+ /// Load the on-disk log.
+ bool Load(const string& path, string* err);
+
+ struct LogEntry {
+ string output;
+ uint64_t command_hash;
+ int start_time;
+ int end_time;
+ TimeStamp restat_mtime;
+
+ static uint64_t HashCommand(StringPiece command);
+
+ // Used by tests.
+ bool operator==(const LogEntry& o) {
+ return output == o.output && command_hash == o.command_hash &&
+ start_time == o.start_time && end_time == o.end_time &&
+ restat_mtime == o.restat_mtime;
+ }
+
+ explicit LogEntry(const string& output);
+ LogEntry(const string& output, uint64_t command_hash,
+ int start_time, int end_time, TimeStamp restat_mtime);
+ };
+
+ /// Lookup a previously-run command by its output path.
+ LogEntry* LookupByOutput(const string& path);
+
+ /// Serialize an entry into a log file.
+ void WriteEntry(FILE* f, const LogEntry& entry);
+
+ /// Rewrite the known log entries, throwing away old data.
+ bool Recompact(const string& path, string* err);
+
+ typedef ExternalStringHashMap<LogEntry*>::Type Entries;
+ const Entries& entries() const { return entries_; }
+
+ private:
+ Entries entries_;
+ FILE* log_file_;
+ bool needs_recompaction_;
+};
+
+#endif // NINJA_BUILD_LOG_H_
diff --git a/ninja/src/build_log_perftest.cc b/ninja/src/build_log_perftest.cc
new file mode 100644
index 00000000000..a09beb827e8
--- /dev/null
+++ b/ninja/src/build_log_perftest.cc
@@ -0,0 +1,144 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "build_log.h"
+#include "graph.h"
+#include "manifest_parser.h"
+#include "state.h"
+#include "util.h"
+#include "metrics.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+const char kTestFilename[] = "BuildLogPerfTest-tempfile";
+
+bool WriteTestData(string* err) {
+ BuildLog log;
+
+ if (!log.OpenForWrite(kTestFilename, err))
+ return false;
+
+ /*
+ A histogram of command lengths in chromium. For example, 407 builds,
+ 1.4% of all builds, had commands longer than 32 bytes but shorter than 64.
+ 32 407 1.4%
+ 64 183 0.6%
+ 128 1461 5.1%
+ 256 791 2.8%
+ 512 1314 4.6%
+ 1024 6114 21.3%
+ 2048 11759 41.0%
+ 4096 2056 7.2%
+ 8192 4567 15.9%
+ 16384 13 0.0%
+ 32768 4 0.0%
+ 65536 5 0.0%
+ The average command length is 4.1 kB and there were 28674 commands in total,
+ which makes for a total log size of ~120 MB (also counting output filenames).
+
+ Based on this, write 30000 many 4 kB long command lines.
+ */
+
+ // ManifestParser is the only object allowed to create Rules.
+ const size_t kRuleSize = 4000;
+ string long_rule_command = "gcc ";
+ for (int i = 0; long_rule_command.size() < kRuleSize; ++i) {
+ char buf[80];
+ sprintf(buf, "-I../../and/arbitrary/but/fairly/long/path/suffixed/%d ", i);
+ long_rule_command += buf;
+ }
+ long_rule_command += "$in -o $out\n";
+
+ State state;
+ ManifestParser parser(&state, NULL);
+ if (!parser.ParseTest("rule cxx\n command = " + long_rule_command, err))
+ return false;
+
+ // Create build edges. Using ManifestParser is as fast as using the State api
+ // for edge creation, so just use that.
+ const int kNumCommands = 30000;
+ string build_rules;
+ for (int i = 0; i < kNumCommands; ++i) {
+ char buf[80];
+ sprintf(buf, "build input%d.o: cxx input%d.cc\n", i, i);
+ build_rules += buf;
+ }
+
+ if (!parser.ParseTest(build_rules, err))
+ return false;
+
+ for (int i = 0; i < kNumCommands; ++i) {
+ log.RecordCommand(state.edges_[i],
+ /*start_time=*/100 * i,
+ /*end_time=*/100 * i + 1,
+ /*restat_mtime=*/0);
+ }
+
+ return true;
+}
+
+int main() {
+ vector<int> times;
+ string err;
+
+ if (!WriteTestData(&err)) {
+ fprintf(stderr, "Failed to write test data: %s\n", err.c_str());
+ return 1;
+ }
+
+ {
+ // Read once to warm up disk cache.
+ BuildLog log;
+ if (!log.Load(kTestFilename, &err)) {
+ fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
+ return 1;
+ }
+ }
+ const int kNumRepetitions = 5;
+ for (int i = 0; i < kNumRepetitions; ++i) {
+ int64_t start = GetTimeMillis();
+ BuildLog log;
+ if (!log.Load(kTestFilename, &err)) {
+ fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
+ return 1;
+ }
+ int delta = (int)(GetTimeMillis() - start);
+ printf("%dms\n", delta);
+ times.push_back(delta);
+ }
+
+ int min = times[0];
+ int max = times[0];
+ float total = 0;
+ for (size_t i = 0; i < times.size(); ++i) {
+ total += times[i];
+ if (times[i] < min)
+ min = times[i];
+ else if (times[i] > max)
+ max = times[i];
+ }
+
+ printf("min %dms max %dms avg %.1fms\n",
+ min, max, total / times.size());
+
+ unlink(kTestFilename);
+
+ return 0;
+}
+
diff --git a/ninja/src/build_log_test.cc b/ninja/src/build_log_test.cc
new file mode 100644
index 00000000000..4639bc93309
--- /dev/null
+++ b/ninja/src/build_log_test.cc
@@ -0,0 +1,264 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build_log.h"
+
+#include "util.h"
+#include "test.h"
+
+#ifdef _WIN32
+#include <fcntl.h>
+#include <share.h>
+#else
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+namespace {
+
+const char kTestFilename[] = "BuildLogTest-tempfile";
+
+struct BuildLogTest : public StateTestWithBuiltinRules {
+ virtual void SetUp() {
+ // In case a crashing test left a stale file behind.
+ unlink(kTestFilename);
+ }
+ virtual void TearDown() {
+ unlink(kTestFilename);
+ }
+};
+
+TEST_F(BuildLogTest, WriteRead) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n");
+
+ BuildLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ log1.RecordCommand(state_.edges_[0], 15, 18);
+ log1.RecordCommand(state_.edges_[1], 20, 25);
+ log1.Close();
+
+ BuildLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ ASSERT_EQ(2u, log1.entries().size());
+ ASSERT_EQ(2u, log2.entries().size());
+ BuildLog::LogEntry* e1 = log1.LookupByOutput("out");
+ ASSERT_TRUE(e1);
+ BuildLog::LogEntry* e2 = log2.LookupByOutput("out");
+ ASSERT_TRUE(e2);
+ ASSERT_TRUE(*e1 == *e2);
+ ASSERT_EQ(15, e1->start_time);
+ ASSERT_EQ("out", e1->output);
+}
+
+TEST_F(BuildLogTest, FirstWriteAddsSignature) {
+ const char kExpectedVersion[] = "# ninja log vX\n";
+ const size_t kVersionPos = strlen(kExpectedVersion) - 2; // Points at 'X'.
+
+ BuildLog log;
+ string contents, err;
+
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ log.Close();
+
+ ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err));
+ ASSERT_EQ("", err);
+ if (contents.size() >= kVersionPos)
+ contents[kVersionPos] = 'X';
+ EXPECT_EQ(kExpectedVersion, contents);
+
+ // Opening the file anew shouldn't add a second version string.
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ log.Close();
+
+ contents.clear();
+ ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err));
+ ASSERT_EQ("", err);
+ if (contents.size() >= kVersionPos)
+ contents[kVersionPos] = 'X';
+ EXPECT_EQ(kExpectedVersion, contents);
+}
+
+TEST_F(BuildLogTest, DoubleEntry) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "0\t1\t2\tout\tcommand abc\n");
+ fprintf(f, "3\t4\t5\tout\tcommand def\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_TRUE(e);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command def", e->command_hash));
+}
+
+TEST_F(BuildLogTest, Truncate) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n");
+
+ BuildLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ log1.RecordCommand(state_.edges_[0], 15, 18);
+ log1.RecordCommand(state_.edges_[1], 20, 25);
+ log1.Close();
+
+ struct stat statbuf;
+ ASSERT_EQ(0, stat(kTestFilename, &statbuf));
+ ASSERT_GT(statbuf.st_size, 0);
+
+ // For all possible truncations of the input file, assert that we don't
+ // crash when parsing.
+ for (off_t size = statbuf.st_size; size > 0; --size) {
+ BuildLog log2;
+ string err;
+ EXPECT_TRUE(log2.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+ log2.RecordCommand(state_.edges_[0], 15, 18);
+ log2.RecordCommand(state_.edges_[1], 20, 25);
+ log2.Close();
+
+ ASSERT_TRUE(Truncate(kTestFilename, size, &err));
+
+ BuildLog log3;
+ err.clear();
+ ASSERT_TRUE(log3.Load(kTestFilename, &err) || !err.empty());
+ }
+}
+
+TEST_F(BuildLogTest, ObsoleteOldVersion) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v3\n");
+ fprintf(f, "123 456 0 out command\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_NE(err.find("version"), string::npos);
+}
+
+TEST_F(BuildLogTest, SpacesInOutputV4) {
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "123\t456\t456\tout with space\tcommand\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out with space");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(123, e->start_time);
+ ASSERT_EQ(456, e->end_time);
+ ASSERT_EQ(456, e->restat_mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash));
+}
+
+TEST_F(BuildLogTest, DuplicateVersionHeader) {
+ // Old versions of ninja accidentally wrote multiple version headers to the
+ // build log on Windows. This shouldn't crash, and the second version header
+ // should be ignored.
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "123\t456\t456\tout\tcommand\n");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "456\t789\t789\tout2\tcommand2\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(123, e->start_time);
+ ASSERT_EQ(456, e->end_time);
+ ASSERT_EQ(456, e->restat_mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash));
+
+ e = log.LookupByOutput("out2");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(456, e->start_time);
+ ASSERT_EQ(789, e->end_time);
+ ASSERT_EQ(789, e->restat_mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash));
+}
+
+TEST_F(BuildLogTest, VeryLongInputLine) {
+ // Ninja's build log buffer is currently 256kB. Lines longer than that are
+ // silently ignored, but don't affect parsing of other lines.
+ FILE* f = fopen(kTestFilename, "wb");
+ fprintf(f, "# ninja log v4\n");
+ fprintf(f, "123\t456\t456\tout\tcommand start");
+ for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i)
+ fputs(" more_command", f);
+ fprintf(f, "\n");
+ fprintf(f, "456\t789\t789\tout2\tcommand2\n");
+ fclose(f);
+
+ string err;
+ BuildLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ BuildLog::LogEntry* e = log.LookupByOutput("out");
+ ASSERT_EQ(NULL, e);
+
+ e = log.LookupByOutput("out2");
+ ASSERT_TRUE(e);
+ ASSERT_EQ(456, e->start_time);
+ ASSERT_EQ(789, e->end_time);
+ ASSERT_EQ(789, e->restat_mtime);
+ ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash));
+}
+
+TEST_F(BuildLogTest, MultiTargetEdge) {
+ AssertParse(&state_,
+"build out out.d: cat\n");
+
+ BuildLog log;
+ log.RecordCommand(state_.edges_[0], 21, 22);
+
+ ASSERT_EQ(2u, log.entries().size());
+ BuildLog::LogEntry* e1 = log.LookupByOutput("out");
+ ASSERT_TRUE(e1);
+ BuildLog::LogEntry* e2 = log.LookupByOutput("out.d");
+ ASSERT_TRUE(e2);
+ ASSERT_EQ("out", e1->output);
+ ASSERT_EQ("out.d", e2->output);
+ ASSERT_EQ(21, e1->start_time);
+ ASSERT_EQ(21, e2->start_time);
+ ASSERT_EQ(22, e2->end_time);
+ ASSERT_EQ(22, e2->end_time);
+}
+
+} // anonymous namespace
diff --git a/ninja/src/build_test.cc b/ninja/src/build_test.cc
new file mode 100644
index 00000000000..313a386fc43
--- /dev/null
+++ b/ninja/src/build_test.cc
@@ -0,0 +1,1752 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build.h"
+
+#include "build_log.h"
+#include "deps_log.h"
+#include "graph.h"
+#include "test.h"
+
+/// Fixture for tests involving Plan.
+// Though Plan doesn't use State, it's useful to have one around
+// to create Nodes and Edges.
+struct PlanTest : public StateTestWithBuiltinRules {
+ Plan plan_;
+
+ /// Because FindWork does not return Edges in any sort of predictable order,
+ // provide a means to get available Edges in order and in a format which is
+ // easy to write tests around.
+ void FindWorkSorted(deque<Edge*>* ret, int count) {
+ struct CompareEdgesByOutput {
+ static bool cmp(const Edge* a, const Edge* b) {
+ return a->outputs_[0]->path() < b->outputs_[0]->path();
+ }
+ };
+
+ for (int i = 0; i < count; ++i) {
+ ASSERT_TRUE(plan_.more_to_do());
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ret->push_back(edge);
+ }
+ ASSERT_FALSE(plan_.FindWork());
+ sort(ret->begin(), ret->end(), CompareEdgesByOutput::cmp);
+ }
+};
+
+TEST_F(PlanTest, Basic) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n");
+ GetNode("mid")->MarkDirty();
+ GetNode("out")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("mid", edge->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("mid", edge->inputs_[0]->path());
+ ASSERT_EQ("out", edge->outputs_[0]->path());
+
+ plan_.EdgeFinished(edge);
+
+ ASSERT_FALSE(plan_.more_to_do());
+ edge = plan_.FindWork();
+ ASSERT_EQ(0, edge);
+}
+
+// Test that two outputs from one rule can be handled as inputs to the next.
+TEST_F(PlanTest, DoubleOutputDirect) {
+ AssertParse(&state_,
+"build out: cat mid1 mid2\n"
+"build mid1 mid2: cat in\n");
+ GetNode("mid1")->MarkDirty();
+ GetNode("mid2")->MarkDirty();
+ GetNode("out")->MarkDirty();
+
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge;
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat in
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat mid1 mid2
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge); // done
+}
+
+// Test that two outputs from one rule can eventually be routed to another.
+TEST_F(PlanTest, DoubleOutputIndirect) {
+ AssertParse(&state_,
+"build out: cat b1 b2\n"
+"build b1: cat a1\n"
+"build b2: cat a2\n"
+"build a1 a2: cat in\n");
+ GetNode("a1")->MarkDirty();
+ GetNode("a2")->MarkDirty();
+ GetNode("b1")->MarkDirty();
+ GetNode("b2")->MarkDirty();
+ GetNode("out")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge;
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat in
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat a1
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat a2
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat b1 b2
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge); // done
+}
+
+// Test that two edges from one output can both execute.
+TEST_F(PlanTest, DoubleDependent) {
+ AssertParse(&state_,
+"build out: cat a1 a2\n"
+"build a1: cat mid\n"
+"build a2: cat mid\n"
+"build mid: cat in\n");
+ GetNode("mid")->MarkDirty();
+ GetNode("a1")->MarkDirty();
+ GetNode("a2")->MarkDirty();
+ GetNode("out")->MarkDirty();
+
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge;
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat in
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat mid
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat mid
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge); // cat a1 a2
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge); // done
+}
+
+TEST_F(PlanTest, DependencyCycle) {
+ AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"
+"build in: cat pre\n"
+"build pre: cat out\n");
+ GetNode("out")->MarkDirty();
+ GetNode("mid")->MarkDirty();
+ GetNode("in")->MarkDirty();
+ GetNode("pre")->MarkDirty();
+
+ string err;
+ EXPECT_FALSE(plan_.AddTarget(GetNode("out"), &err));
+ ASSERT_EQ("dependency cycle: out -> mid -> in -> pre -> out", err);
+}
+
+TEST_F(PlanTest, PoolWithDepthOne) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"pool foobar\n"
+" depth = 1\n"
+"rule poolcat\n"
+" command = cat $in > $out\n"
+" pool = foobar\n"
+"build out1: poolcat in\n"
+"build out2: poolcat in\n"));
+ GetNode("out1")->MarkDirty();
+ GetNode("out2")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out1"), &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("out1", edge->outputs_[0]->path());
+
+ // This will be false since poolcat is serialized
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("out2", edge->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(edge);
+
+ ASSERT_FALSE(plan_.more_to_do());
+ edge = plan_.FindWork();
+ ASSERT_EQ(0, edge);
+}
+
+TEST_F(PlanTest, PoolsWithDepthTwo) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"pool foobar\n"
+" depth = 2\n"
+"pool bazbin\n"
+" depth = 2\n"
+"rule foocat\n"
+" command = cat $in > $out\n"
+" pool = foobar\n"
+"rule bazcat\n"
+" command = cat $in > $out\n"
+" pool = bazbin\n"
+"build out1: foocat in\n"
+"build out2: foocat in\n"
+"build out3: foocat in\n"
+"build outb1: bazcat in\n"
+"build outb2: bazcat in\n"
+"build outb3: bazcat in\n"
+" pool =\n"
+"build allTheThings: cat out1 out2 out3 outb1 outb2 outb3\n"
+));
+ // Mark all the out* nodes dirty
+ for (int i = 0; i < 3; ++i) {
+ GetNode("out" + string(1, '1' + static_cast<char>(i)))->MarkDirty();
+ GetNode("outb" + string(1, '1' + static_cast<char>(i)))->MarkDirty();
+ }
+ GetNode("allTheThings")->MarkDirty();
+
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("allTheThings"), &err));
+ ASSERT_EQ("", err);
+
+ deque<Edge*> edges;
+ FindWorkSorted(&edges, 5);
+
+ for (int i = 0; i < 4; ++i) {
+ Edge *edge = edges[i];
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ string base_name(i < 2 ? "out" : "outb");
+ ASSERT_EQ(base_name + string(1, '1' + (i % 2)), edge->outputs_[0]->path());
+ }
+
+ // outb3 is exempt because it has an empty pool
+ Edge* edge = edges[4];
+ ASSERT_TRUE(edge);
+ ASSERT_EQ("in", edge->inputs_[0]->path());
+ ASSERT_EQ("outb3", edge->outputs_[0]->path());
+
+ // finish out1
+ plan_.EdgeFinished(edges.front());
+ edges.pop_front();
+
+ // out3 should be available
+ Edge* out3 = plan_.FindWork();
+ ASSERT_TRUE(out3);
+ ASSERT_EQ("in", out3->inputs_[0]->path());
+ ASSERT_EQ("out3", out3->outputs_[0]->path());
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ plan_.EdgeFinished(out3);
+
+ ASSERT_FALSE(plan_.FindWork());
+
+ for (deque<Edge*>::iterator it = edges.begin(); it != edges.end(); ++it) {
+ plan_.EdgeFinished(*it);
+ }
+
+ Edge* last = plan_.FindWork();
+ ASSERT_TRUE(last);
+ ASSERT_EQ("allTheThings", last->outputs_[0]->path());
+
+ plan_.EdgeFinished(last);
+
+ ASSERT_FALSE(plan_.more_to_do());
+ ASSERT_FALSE(plan_.FindWork());
+}
+
+TEST_F(PlanTest, PoolWithRedundantEdges) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "pool compile\n"
+ " depth = 1\n"
+ "rule gen_foo\n"
+ " command = touch foo.cpp\n"
+ "rule gen_bar\n"
+ " command = touch bar.cpp\n"
+ "rule echo\n"
+ " command = echo $out > $out\n"
+ "build foo.cpp.obj: echo foo.cpp || foo.cpp\n"
+ " pool = compile\n"
+ "build bar.cpp.obj: echo bar.cpp || bar.cpp\n"
+ " pool = compile\n"
+ "build libfoo.a: echo foo.cpp.obj bar.cpp.obj\n"
+ "build foo.cpp: gen_foo\n"
+ "build bar.cpp: gen_bar\n"
+ "build all: phony libfoo.a\n"));
+ GetNode("foo.cpp")->MarkDirty();
+ GetNode("foo.cpp.obj")->MarkDirty();
+ GetNode("bar.cpp")->MarkDirty();
+ GetNode("bar.cpp.obj")->MarkDirty();
+ GetNode("libfoo.a")->MarkDirty();
+ GetNode("all")->MarkDirty();
+ string err;
+ EXPECT_TRUE(plan_.AddTarget(GetNode("all"), &err));
+ ASSERT_EQ("", err);
+ ASSERT_TRUE(plan_.more_to_do());
+
+ Edge* edge = NULL;
+
+ deque<Edge*> initial_edges;
+ FindWorkSorted(&initial_edges, 2);
+
+ edge = initial_edges[1]; // Foo first
+ ASSERT_EQ("foo.cpp", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("foo.cpp", edge->inputs_[0]->path());
+ ASSERT_EQ("foo.cpp", edge->inputs_[1]->path());
+ ASSERT_EQ("foo.cpp.obj", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge);
+
+ edge = initial_edges[0]; // Now for bar
+ ASSERT_EQ("bar.cpp", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("bar.cpp", edge->inputs_[0]->path());
+ ASSERT_EQ("bar.cpp", edge->inputs_[1]->path());
+ ASSERT_EQ("bar.cpp.obj", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("foo.cpp.obj", edge->inputs_[0]->path());
+ ASSERT_EQ("bar.cpp.obj", edge->inputs_[1]->path());
+ ASSERT_EQ("libfoo.a", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_TRUE(edge);
+ ASSERT_FALSE(plan_.FindWork());
+ ASSERT_EQ("libfoo.a", edge->inputs_[0]->path());
+ ASSERT_EQ("all", edge->outputs_[0]->path());
+ plan_.EdgeFinished(edge);
+
+ edge = plan_.FindWork();
+ ASSERT_FALSE(edge);
+ ASSERT_FALSE(plan_.more_to_do());
+}
+
+/// Fake implementation of CommandRunner, useful for tests.
+struct FakeCommandRunner : public CommandRunner {
+ explicit FakeCommandRunner(VirtualFileSystem* fs) :
+ last_command_(NULL), fs_(fs) {}
+
+ // CommandRunner impl
+ virtual bool CanRunMore();
+ virtual bool StartCommand(Edge* edge);
+ virtual bool WaitForCommand(Result* result);
+ virtual vector<Edge*> GetActiveEdges();
+ virtual void Abort();
+
+ vector<string> commands_ran_;
+ Edge* last_command_;
+ VirtualFileSystem* fs_;
+};
+
+struct BuildTest : public StateTestWithBuiltinRules {
+ BuildTest() : config_(MakeConfig()), command_runner_(&fs_),
+ builder_(&state_, config_, NULL, NULL, &fs_),
+ status_(config_) {
+ }
+
+ virtual void SetUp() {
+ StateTestWithBuiltinRules::SetUp();
+
+ builder_.command_runner_.reset(&command_runner_);
+ AssertParse(&state_,
+"build cat1: cat in1\n"
+"build cat2: cat in1 in2\n"
+"build cat12: cat cat1 cat2\n");
+
+ fs_.Create("in1", "");
+ fs_.Create("in2", "");
+ }
+
+ ~BuildTest() {
+ builder_.command_runner_.release();
+ }
+
+ // Mark a path dirty.
+ void Dirty(const string& path);
+
+ BuildConfig MakeConfig() {
+ BuildConfig config;
+ config.verbosity = BuildConfig::QUIET;
+ return config;
+ }
+
+ BuildConfig config_;
+ FakeCommandRunner command_runner_;
+ VirtualFileSystem fs_;
+ Builder builder_;
+
+ BuildStatus status_;
+};
+
+bool FakeCommandRunner::CanRunMore() {
+ // Only run one at a time.
+ return last_command_ == NULL;
+}
+
+bool FakeCommandRunner::StartCommand(Edge* edge) {
+ assert(!last_command_);
+ commands_ran_.push_back(edge->EvaluateCommand());
+ if (edge->rule().name() == "cat" ||
+ edge->rule().name() == "cat_rsp" ||
+ edge->rule().name() == "cc" ||
+ edge->rule().name() == "touch" ||
+ edge->rule().name() == "touch-interrupt") {
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ fs_->Create((*out)->path(), "");
+ }
+ } else if (edge->rule().name() == "true" ||
+ edge->rule().name() == "fail" ||
+ edge->rule().name() == "interrupt") {
+ // Don't do anything.
+ } else {
+ printf("unknown command\n");
+ return false;
+ }
+
+ last_command_ = edge;
+ return true;
+}
+
+bool FakeCommandRunner::WaitForCommand(Result* result) {
+ if (!last_command_)
+ return false;
+
+ Edge* edge = last_command_;
+ result->edge = edge;
+
+ if (edge->rule().name() == "interrupt" ||
+ edge->rule().name() == "touch-interrupt") {
+ result->status = ExitInterrupted;
+ return true;
+ }
+
+ if (edge->rule().name() == "fail")
+ result->status = ExitFailure;
+ else
+ result->status = ExitSuccess;
+ last_command_ = NULL;
+ return true;
+}
+
+vector<Edge*> FakeCommandRunner::GetActiveEdges() {
+ vector<Edge*> edges;
+ if (last_command_)
+ edges.push_back(last_command_);
+ return edges;
+}
+
+void FakeCommandRunner::Abort() {
+ last_command_ = NULL;
+}
+
+void BuildTest::Dirty(const string& path) {
+ Node* node = GetNode(path);
+ node->MarkDirty();
+
+ // If it's an input file, mark that we've already stat()ed it and
+ // it's missing.
+ if (!node->in_edge())
+ node->MarkMissing();
+}
+
+TEST_F(BuildTest, NoWork) {
+ string err;
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+TEST_F(BuildTest, OneStep) {
+ // Given a dirty target with one ready input,
+ // we should rebuild the target.
+ Dirty("cat1");
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cat1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cat in1 > cat1", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, OneStep2) {
+ // Given a target with one dirty input,
+ // we should rebuild the target.
+ Dirty("cat1");
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cat1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cat in1 > cat1", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, TwoStep) {
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("cat12", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ // Depending on how the pointers work out, we could've ran
+ // the first two commands in either order.
+ EXPECT_TRUE((command_runner_.commands_ran_[0] == "cat in1 > cat1" &&
+ command_runner_.commands_ran_[1] == "cat in1 in2 > cat2") ||
+ (command_runner_.commands_ran_[1] == "cat in1 > cat1" &&
+ command_runner_.commands_ran_[0] == "cat in1 in2 > cat2"));
+
+ EXPECT_EQ("cat cat1 cat2 > cat12", command_runner_.commands_ran_[2]);
+
+ fs_.Tick();
+
+ // Modifying in2 requires rebuilding one intermediate file
+ // and the final file.
+ fs_.Create("in2", "");
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("cat12", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(5u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("cat in1 in2 > cat2", command_runner_.commands_ran_[3]);
+ EXPECT_EQ("cat cat1 cat2 > cat12", command_runner_.commands_ran_[4]);
+}
+
+TEST_F(BuildTest, TwoOutputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build out1 out2: touch in.txt\n"));
+
+ fs_.Create("in.txt", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ EXPECT_EQ("touch out1 out2", command_runner_.commands_ran_[0]);
+}
+
+// Test case from
+// https://github.com/martine/ninja/issues/148
+TEST_F(BuildTest, MultiOutIn) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule touch\n"
+" command = touch $out\n"
+"build in1 otherfile: touch in\n"
+"build out: touch in | in1\n"));
+
+ fs_.Create("in", "");
+ fs_.Tick();
+ fs_.Create("in1", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(BuildTest, Chain) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build c2: cat c1\n"
+"build c3: cat c2\n"
+"build c4: cat c3\n"
+"build c5: cat c4\n"));
+
+ fs_.Create("c1", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("c5", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(4u, command_runner_.commands_ran_.size());
+
+ err.clear();
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("c5", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ fs_.Tick();
+
+ fs_.Create("c3", "");
+ err.clear();
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("c5", &err));
+ ASSERT_EQ("", err);
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // 3->4, 4->5
+}
+
+TEST_F(BuildTest, MissingInput) {
+ // Input is referenced by build file, but no rule for it.
+ string err;
+ Dirty("in1");
+ EXPECT_FALSE(builder_.AddTarget("cat1", &err));
+ EXPECT_EQ("'in1', needed by 'cat1', missing and no known rule to make it",
+ err);
+}
+
+TEST_F(BuildTest, MissingTarget) {
+ // Target is not referenced by build file.
+ string err;
+ EXPECT_FALSE(builder_.AddTarget("meow", &err));
+ EXPECT_EQ("unknown target: 'meow'", err);
+}
+
+TEST_F(BuildTest, MakeDirs) {
+ string err;
+
+#ifdef _WIN32
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "build subdir\\dir2\\file: cat in1\n"));
+ EXPECT_TRUE(builder_.AddTarget("subdir\\dir2\\file", &err));
+#else
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "build subdir/dir2/file: cat in1\n"));
+ EXPECT_TRUE(builder_.AddTarget("subdir/dir2/file", &err));
+#endif
+
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, fs_.directories_made_.size());
+ EXPECT_EQ("subdir", fs_.directories_made_[0]);
+#ifdef _WIN32
+ EXPECT_EQ("subdir\\dir2", fs_.directories_made_[1]);
+#else
+ EXPECT_EQ("subdir/dir2", fs_.directories_made_[1]);
+#endif
+}
+
+TEST_F(BuildTest, DepFileMissing) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c\n"));
+ fs_.Create("foo.c", "");
+
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, fs_.files_read_.size());
+ EXPECT_EQ("foo.o.d", fs_.files_read_[0]);
+}
+
+TEST_F(BuildTest, DepFileOK) {
+ string err;
+ int orig_edges = state_.edges_.size();
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c\n"));
+ Edge* edge = state_.edges_.back();
+
+ fs_.Create("foo.c", "");
+ GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing.
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, fs_.files_read_.size());
+ EXPECT_EQ("foo.o.d", fs_.files_read_[0]);
+
+ // Expect three new edges: one generating foo.o, and two more from
+ // loading the depfile.
+ ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size());
+ // Expect our edge to now have three inputs: foo.c and two headers.
+ ASSERT_EQ(3u, edge->inputs_.size());
+
+ // Expect the command line we generate to only use the original input.
+ ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
+}
+
+TEST_F(BuildTest, DepFileParseError) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c\n"));
+ fs_.Create("foo.c", "");
+ fs_.Create("foo.o.d", "randomtext\n");
+ EXPECT_FALSE(builder_.AddTarget("foo.o", &err));
+ EXPECT_EQ("expected depfile 'foo.o.d' to mention 'foo.o', got 'randomtext'",
+ err);
+}
+
+TEST_F(BuildTest, OrderOnlyDeps) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n depfile = $out.d\n"
+"build foo.o: cc foo.c || otherfile\n"));
+ Edge* edge = state_.edges_.back();
+
+ fs_.Create("foo.c", "");
+ fs_.Create("otherfile", "");
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+
+ // One explicit, two implicit, one order only.
+ ASSERT_EQ(4u, edge->inputs_.size());
+ EXPECT_EQ(2, edge->implicit_deps_);
+ EXPECT_EQ(1, edge->order_only_deps_);
+ // Verify the inputs are in the order we expect
+ // (explicit then implicit then orderonly).
+ EXPECT_EQ("foo.c", edge->inputs_[0]->path());
+ EXPECT_EQ("blah.h", edge->inputs_[1]->path());
+ EXPECT_EQ("bar.h", edge->inputs_[2]->path());
+ EXPECT_EQ("otherfile", edge->inputs_[3]->path());
+
+ // Expect the command line we generate to only use the original input.
+ ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
+
+ // explicit dep dirty, expect a rebuild.
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ fs_.Tick();
+
+ // Recreate the depfile, as it should have been deleted by the build.
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+
+ // implicit dep dirty, expect a rebuild.
+ fs_.Create("blah.h", "");
+ fs_.Create("bar.h", "");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ fs_.Tick();
+
+ // Recreate the depfile, as it should have been deleted by the build.
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+
+ // order only dep dirty, no rebuild.
+ fs_.Create("otherfile", "");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ // implicit dep missing, expect rebuild.
+ fs_.RemoveFile("bar.h");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, RebuildOrderOnlyDeps) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n command = cc $in\n"
+"rule true\n command = true\n"
+"build oo.h: cc oo.h.in\n"
+"build foo.o: cc foo.c || oo.h\n"));
+
+ fs_.Create("foo.c", "");
+ fs_.Create("oo.h.in", "");
+
+ // foo.o and order-only dep dirty, build both.
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // all clean, no rebuild.
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ // order-only dep missing, build it only.
+ fs_.RemoveFile("oo.h");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]);
+
+ fs_.Tick();
+
+ // order-only dep dirty, build it only.
+ fs_.Create("oo.h.in", "");
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("foo.o", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]);
+}
+
+TEST_F(BuildTest, Phony) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat bar.cc\n"
+"build all: phony out\n"));
+ fs_.Create("bar.cc", "");
+
+ EXPECT_TRUE(builder_.AddTarget("all", &err));
+ ASSERT_EQ("", err);
+
+ // Only one command to run, because phony runs no command.
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, PhonyNoWork) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat bar.cc\n"
+"build all: phony out\n"));
+ fs_.Create("bar.cc", "");
+ fs_.Create("out", "");
+
+ EXPECT_TRUE(builder_.AddTarget("all", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+TEST_F(BuildTest, Fail) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule fail\n"
+" command = fail\n"
+"build out1: fail\n"));
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("subcommand failed", err);
+}
+
+TEST_F(BuildTest, SwallowFailures) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule fail\n"
+" command = fail\n"
+"build out1: fail\n"
+"build out2: fail\n"
+"build out3: fail\n"
+"build all: phony out1 out2 out3\n"));
+
+ // Swallow two failures, die on the third.
+ config_.failures_allowed = 3;
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("all", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("subcommands failed", err);
+}
+
+TEST_F(BuildTest, SwallowFailuresLimit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule fail\n"
+" command = fail\n"
+"build out1: fail\n"
+"build out2: fail\n"
+"build out3: fail\n"
+"build final: cat out1 out2 out3\n"));
+
+ // Swallow ten failures; we should stop before building final.
+ config_.failures_allowed = 11;
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("final", &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+ ASSERT_EQ("cannot make progress due to previous errors", err);
+}
+
+struct BuildWithLogTest : public BuildTest {
+ BuildWithLogTest() {
+ builder_.SetBuildLog(&build_log_);
+ }
+
+ BuildLog build_log_;
+};
+
+TEST_F(BuildWithLogTest, NotInLogButOnDisk) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc\n"
+"build out1: cc in\n"));
+
+ // Create input/output that would be considered up to date when
+ // not considering the command line hash.
+ fs_.Create("in", "");
+ fs_.Create("out1", "");
+ string err;
+
+ // Because it's not in the log, it should not be up-to-date until
+ // we build again.
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+}
+
+TEST_F(BuildWithLogTest, RestatTest) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+" restat = 1\n"
+"rule cc\n"
+" command = cc\n"
+" restat = 1\n"
+"build out1: cc in\n"
+"build out2: true out1\n"
+"build out3: cat out2\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("out3", "");
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ // Do a pre-build so that there's commands in the log for the outputs,
+ // otherwise, the lack of an entry in the build log will cause out3 to rebuild
+ // regardless of restat.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+ // "cc" touches out1, so we should build out2. But because "true" does not
+ // touch out2, we should cancel the build of out3.
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // If we run again, it should be a no-op, because the build log has recorded
+ // that we've already built out2 with an input timestamp of 2 (from out1).
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ // The build log entry should not, however, prevent us from rebuilding out2
+ // if out1 changes.
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildWithLogTest, RestatMissingFile) {
+ // If a restat rule doesn't create its output, and the output didn't
+ // exist before the rule was run, consider that behavior equivalent
+ // to a rule that doesn't modify its existent output file.
+
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+" restat = 1\n"
+"rule cc\n"
+" command = cc\n"
+"build out1: true in\n"
+"build out2: cc out1\n"));
+
+ fs_.Create("in", "");
+ fs_.Create("out2", "");
+
+ // Do a pre-build so that there's commands in the log for the outputs,
+ // otherwise, the lack of an entry in the build log will cause out2 to rebuild
+ // regardless of restat.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ("", err);
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+
+ fs_.Tick();
+ fs_.Create("in", "");
+ fs_.Create("out2", "");
+
+ // Run a build, expect only the first command to run.
+ // It doesn't touch its output (due to being the "true" command), so
+ // we shouldn't run the dependent build.
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+// Test scenario, in which an input file is removed, but output isn't changed
+// https://github.com/martine/ninja/issues/295
+TEST_F(BuildWithLogTest, RestatMissingInput) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule true\n"
+ " command = true\n"
+ " depfile = $out.d\n"
+ " restat = 1\n"
+ "rule cc\n"
+ " command = cc\n"
+ "build out1: true in\n"
+ "build out2: cc out1\n"));
+
+ // Create all necessary files
+ fs_.Create("in", "");
+
+ // The implicit dependencies and the depfile itself
+ // are newer than the output
+ TimeStamp restat_mtime = fs_.Tick();
+ fs_.Create("out1.d", "out1: will.be.deleted restat.file\n");
+ fs_.Create("will.be.deleted", "");
+ fs_.Create("restat.file", "");
+
+ // Run the build, out1 and out2 get built
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size());
+
+ // See that an entry in the logfile is created, capturing
+ // the right mtime
+ BuildLog::LogEntry * log_entry = build_log_.LookupByOutput("out1");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(restat_mtime, log_entry->restat_mtime);
+
+ // Now remove a file, referenced from depfile, so that target becomes
+ // dirty, but the output does not change
+ fs_.RemoveFile("will.be.deleted");
+
+ // Trigger the build again - only out1 gets built
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // Check that the logfile entry remains correctly set
+ log_entry = build_log_.LookupByOutput("out1");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_EQ(restat_mtime, log_entry->restat_mtime);
+}
+
+struct BuildDryRun : public BuildWithLogTest {
+ BuildDryRun() {
+ config_.dry_run = true;
+ }
+};
+
+TEST_F(BuildDryRun, AllCommandsShown) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n"
+" restat = 1\n"
+"rule cc\n"
+" command = cc\n"
+" restat = 1\n"
+"build out1: cc in\n"
+"build out2: true out1\n"
+"build out3: cat out2\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("out3", "");
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ // "cc" touches out1, so we should build out2. But because "true" does not
+ // touch out2, we should cancel the build of out3.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out3", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(3u, command_runner_.commands_ran_.size());
+}
+
+// Test that RSP files are created when & where appropriate and deleted after
+// successful execution.
+TEST_F(BuildTest, RspFileSuccess)
+{
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule cat_rsp\n"
+ " command = cat $rspfile > $out\n"
+ " rspfile = $rspfile\n"
+ " rspfile_content = $long_command\n"
+ "build out1: cat in\n"
+ "build out2: cat_rsp in\n"
+ " rspfile = out2.rsp\n"
+ " long_command = Some very long command\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Create("out3", "");
+
+ fs_.Tick();
+
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+
+ size_t files_created = fs_.files_created_.size();
+ size_t files_removed = fs_.files_removed_.size();
+
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // cat + cat_rsp
+
+ // The RSP file was created
+ ASSERT_EQ(files_created + 1, fs_.files_created_.size());
+ ASSERT_EQ(1u, fs_.files_created_.count("out2.rsp"));
+
+ // The RSP file was removed
+ ASSERT_EQ(files_removed + 1, fs_.files_removed_.size());
+ ASSERT_EQ(1u, fs_.files_removed_.count("out2.rsp"));
+}
+
+// Test that RSP file is created but not removed for commands, which fail
+TEST_F(BuildTest, RspFileFailure) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule fail\n"
+ " command = fail\n"
+ " rspfile = $rspfile\n"
+ " rspfile_content = $long_command\n"
+ "build out: fail in\n"
+ " rspfile = out.rsp\n"
+ " long_command = Another very long command\n"));
+
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+
+ size_t files_created = fs_.files_created_.size();
+ size_t files_removed = fs_.files_removed_.size();
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ("subcommand failed", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // The RSP file was created
+ ASSERT_EQ(files_created + 1, fs_.files_created_.size());
+ ASSERT_EQ(1u, fs_.files_created_.count("out.rsp"));
+
+ // The RSP file was NOT removed
+ ASSERT_EQ(files_removed, fs_.files_removed_.size());
+ ASSERT_EQ(0u, fs_.files_removed_.count("out.rsp"));
+
+ // The RSP file contains what it should
+ ASSERT_EQ("Another very long command", fs_.files_["out.rsp"].contents);
+}
+
+// Test that contens of the RSP file behaves like a regular part of
+// command line, i.e. triggers a rebuild if changed
+TEST_F(BuildWithLogTest, RspFileCmdLineChange) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "rule cat_rsp\n"
+ " command = cat $rspfile > $out\n"
+ " rspfile = $rspfile\n"
+ " rspfile_content = $long_command\n"
+ "build out: cat_rsp in\n"
+ " rspfile = out.rsp\n"
+ " long_command = Original very long command\n"));
+
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+
+ // 1. Build for the 1st time (-> populate log)
+ EXPECT_TRUE(builder_.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ // 2. Build again (no change)
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+ ASSERT_TRUE(builder_.AlreadyUpToDate());
+
+ // 3. Alter the entry in the logfile
+ // (to simulate a change in the command line between 2 builds)
+ BuildLog::LogEntry * log_entry = build_log_.LookupByOutput("out");
+ ASSERT_TRUE(NULL != log_entry);
+ ASSERT_NO_FATAL_FAILURE(AssertHash(
+ "cat out.rsp > out;rspfile=Original very long command",
+ log_entry->command_hash));
+ log_entry->command_hash++; // Change the command hash to something else.
+ // Now expect the target to be rebuilt
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ EXPECT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, InterruptCleanup) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule interrupt\n"
+" command = interrupt\n"
+"rule touch-interrupt\n"
+" command = touch-interrupt\n"
+"build out1: interrupt in1\n"
+"build out2: touch-interrupt in2\n"));
+
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+ fs_.Tick();
+ fs_.Create("in1", "");
+ fs_.Create("in2", "");
+
+ // An untouched output of an interrupted command should be retained.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ EXPECT_EQ("", err);
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("interrupted by user", err);
+ builder_.Cleanup();
+ EXPECT_GT(fs_.Stat("out1"), 0);
+ err = "";
+
+ // A touched output of an interrupted command should be deleted.
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ EXPECT_EQ("", err);
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("interrupted by user", err);
+ builder_.Cleanup();
+ EXPECT_EQ(0, fs_.Stat("out2"));
+}
+
+TEST_F(BuildTest, PhonyWithNoInputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build nonexistent: phony\n"
+"build out1: cat || nonexistent\n"
+"build out2: cat nonexistent\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ // out1 should be up to date even though its input is dirty, because its
+ // order-only dependency has nothing to do.
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out1", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+ // out2 should still be out of date though, because its input is dirty.
+ err.clear();
+ command_runner_.commands_ran_.clear();
+ state_.Reset();
+ EXPECT_TRUE(builder_.AddTarget("out2", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, DepsGccWithEmptyDepfileErrorsOut) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc\n"
+" deps = gcc\n"
+"build out: cc\n"));
+ Dirty("out");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_FALSE(builder_.AlreadyUpToDate());
+
+ EXPECT_FALSE(builder_.Build(&err));
+ ASSERT_EQ("subcommand failed", err);
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
+TEST_F(BuildTest, StatusFormatReplacePlaceholder) {
+ EXPECT_EQ("[%/s0/t0/r0/u0/f0]",
+ status_.FormatProgressStatus("[%%/s%s/t%t/r%r/u%u/f%f]"));
+}
+
+TEST_F(BuildTest, FailedDepsParse) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build bad_deps.o: cat in1\n"
+" deps = gcc\n"
+" depfile = in1.d\n"));
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("bad_deps.o", &err));
+ ASSERT_EQ("", err);
+
+ // These deps will fail to parse, as they should only have one
+ // path to the left of the colon.
+ fs_.Create("in1.d", "AAA BBB");
+
+ EXPECT_FALSE(builder_.Build(&err));
+ EXPECT_EQ("subcommand failed", err);
+}
+
+/// Tests of builds involving deps logs necessarily must span
+/// multiple builds. We reuse methods on BuildTest but not the
+/// builder_ it sets up, because we want pristine objects for
+/// each build.
+struct BuildWithDepsLogTest : public BuildTest {
+ BuildWithDepsLogTest() {}
+
+ virtual void SetUp() {
+ BuildTest::SetUp();
+
+ temp_dir_.CreateAndEnter("BuildWithDepsLogTest");
+ }
+
+ virtual void TearDown() {
+ temp_dir_.Cleanup();
+ }
+
+ ScopedTempDir temp_dir_;
+
+ /// Shadow parent class builder_ so we don't accidentally use it.
+ void* builder_;
+};
+
+/// Run a straightforwad build where the deps log is used.
+TEST_F(BuildWithDepsLogTest, Straightforward) {
+ string err;
+ // Note: in1 was created by the superclass SetUp().
+ const char* manifest =
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in1.d", "out: in2");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // The deps file should have been removed.
+ EXPECT_EQ(0, fs_.Stat("in1.d"));
+ // Recreate it for the next step.
+ fs_.Create("in1.d", "out: in2");
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Touch the file only mentioned in the deps.
+ fs_.Tick();
+ fs_.Create("in2", "");
+
+ // Run the build again.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // We should have rebuilt the output due to in2 being
+ // out of date.
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+}
+
+/// Verify that obsolete dependency info causes a rebuild.
+/// 1) Run a successful build where everything has time t, record deps.
+/// 2) Move input/output to time t+1 -- despite files in alignment,
+/// should still need to rebuild due to deps at older time.
+TEST_F(BuildWithDepsLogTest, ObsoleteDeps) {
+ string err;
+ // Note: in1 was created by the superclass SetUp().
+ const char* manifest =
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+ {
+ // Run an ordinary build that gathers dependencies.
+ fs_.Create("in1", "");
+ fs_.Create("in1.d", "out: ");
+
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ // Push all files one tick forward so that only the deps are out
+ // of date.
+ fs_.Tick();
+ fs_.Create("in1", "");
+ fs_.Create("out", "");
+
+ // The deps file should have been removed, so no need to timestamp it.
+ EXPECT_EQ(0, fs_.Stat("in1.d"));
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+
+ // Recreate the deps file here because the build expects them to exist.
+ fs_.Create("in1.d", "out: ");
+
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // We should have rebuilt the output due to the deps being
+ // out of date.
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+}
+
+TEST_F(BuildWithDepsLogTest, DepsIgnoredInDryRun) {
+ const char* manifest =
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("in1", "");
+
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // The deps log is NULL in dry runs.
+ config_.dry_run = true;
+ Builder builder(&state, config_, NULL, NULL, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+
+ string err;
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+}
+
+/// Check that a restat rule generating a header cancels compilations correctly.
+TEST_F(BuildTest, RestatDepfileDependency) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule true\n"
+" command = true\n" // Would be "write if out-of-date" in reality.
+" restat = 1\n"
+"build header.h: true header.in\n"
+"build out: cat in1\n"
+" depfile = in1.d\n"));
+
+ fs_.Create("header.h", "");
+ fs_.Create("in1.d", "out: header.h");
+ fs_.Tick();
+ fs_.Create("header.in", "");
+
+ string err;
+ EXPECT_TRUE(builder_.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder_.Build(&err));
+ EXPECT_EQ("", err);
+}
+
+/// Check that a restat rule generating a header cancels compilations correctly,
+/// depslog case.
+TEST_F(BuildWithDepsLogTest, RestatDepfileDependencyDepsLog) {
+ string err;
+ // Note: in1 was created by the superclass SetUp().
+ const char* manifest =
+ "rule true\n"
+ " command = true\n" // Would be "write if out-of-date" in reality.
+ " restat = 1\n"
+ "build header.h: true header.in\n"
+ "build out: cat in1\n"
+ " deps = gcc\n"
+ " depfile = in1.d\n";
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("in1.d", "out: header.h");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Touch the input of the restat rule.
+ fs_.Tick();
+ fs_.Create("header.in", "");
+
+ // Run the build again.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ command_runner_.commands_ran_.clear();
+ EXPECT_TRUE(builder.AddTarget("out", &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ // Rule "true" should have run again, but the build of "out" should have
+ // been cancelled due to restat propagating through the depfile header.
+ EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+
+ builder.command_runner_.release();
+ }
+}
+
+TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) {
+ string err;
+ const char* manifest =
+ "rule cc\n command = cc $in\n depfile = $out.d\n deps = gcc\n"
+ "build foo.o: cc foo.c\n";
+
+ fs_.Create("foo.c", "");
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ // Run the build once, everything should be ok.
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+ EXPECT_TRUE(builder.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+ fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n");
+ EXPECT_TRUE(builder.Build(&err));
+ EXPECT_EQ("", err);
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+
+ {
+ State state;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+ DepsLog deps_log;
+ ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
+ ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+ ASSERT_EQ("", err);
+
+ Builder builder(&state, config_, NULL, &deps_log, &fs_);
+ builder.command_runner_.reset(&command_runner_);
+
+ Edge* edge = state.edges_.back();
+
+ state.GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing.
+ EXPECT_TRUE(builder.AddTarget("foo.o", &err));
+ ASSERT_EQ("", err);
+
+ // Expect three new edges: one generating foo.o, and two more from
+ // loading the depfile.
+ ASSERT_EQ(3, (int)state.edges_.size());
+ // Expect our edge to now have three inputs: foo.c and two headers.
+ ASSERT_EQ(3u, edge->inputs_.size());
+
+ // Expect the command line we generate to only use the original input.
+ ASSERT_EQ("cc foo.c", edge->EvaluateCommand());
+
+ deps_log.Close();
+ builder.command_runner_.release();
+ }
+}
diff --git a/ninja/src/canon_perftest.cc b/ninja/src/canon_perftest.cc
new file mode 100644
index 00000000000..59bd18f4526
--- /dev/null
+++ b/ninja/src/canon_perftest.cc
@@ -0,0 +1,56 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <string.h>
+
+#include "util.h"
+#include "metrics.h"
+
+const char kPath[] =
+ "../../third_party/WebKit/Source/WebCore/"
+ "platform/leveldb/LevelDBWriteBatch.cpp";
+
+int main() {
+ vector<int> times;
+ string err;
+
+ char buf[200];
+ size_t len = strlen(kPath);
+ strcpy(buf, kPath);
+
+ for (int j = 0; j < 5; ++j) {
+ const int kNumRepetitions = 2000000;
+ int64_t start = GetTimeMillis();
+ for (int i = 0; i < kNumRepetitions; ++i) {
+ CanonicalizePath(buf, &len, &err);
+ }
+ int delta = (int)(GetTimeMillis() - start);
+ times.push_back(delta);
+ }
+
+ int min = times[0];
+ int max = times[0];
+ float total = 0;
+ for (size_t i = 0; i < times.size(); ++i) {
+ total += times[i];
+ if (times[i] < min)
+ min = times[i];
+ else if (times[i] > max)
+ max = times[i];
+ }
+
+ printf("min %dms max %dms avg %.1fms\n",
+ min, max, total / times.size());
+}
diff --git a/ninja/src/clean.cc b/ninja/src/clean.cc
new file mode 100644
index 00000000000..5d1974e1582
--- /dev/null
+++ b/ninja/src/clean.cc
@@ -0,0 +1,259 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "clean.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "disk_interface.h"
+#include "graph.h"
+#include "state.h"
+#include "util.h"
+
+Cleaner::Cleaner(State* state, const BuildConfig& config)
+ : state_(state),
+ config_(config),
+ removed_(),
+ cleaned_(),
+ cleaned_files_count_(0),
+ disk_interface_(new RealDiskInterface),
+ status_(0) {
+}
+
+Cleaner::Cleaner(State* state,
+ const BuildConfig& config,
+ DiskInterface* disk_interface)
+ : state_(state),
+ config_(config),
+ removed_(),
+ cleaned_(),
+ cleaned_files_count_(0),
+ disk_interface_(disk_interface),
+ status_(0) {
+}
+
+int Cleaner::RemoveFile(const string& path) {
+ return disk_interface_->RemoveFile(path);
+}
+
+bool Cleaner::FileExists(const string& path) {
+ return disk_interface_->Stat(path) > 0;
+}
+
+void Cleaner::Report(const string& path) {
+ ++cleaned_files_count_;
+ if (IsVerbose())
+ printf("Remove %s\n", path.c_str());
+}
+
+void Cleaner::Remove(const string& path) {
+ if (!IsAlreadyRemoved(path)) {
+ removed_.insert(path);
+ if (config_.dry_run) {
+ if (FileExists(path))
+ Report(path);
+ } else {
+ int ret = RemoveFile(path);
+ if (ret == 0)
+ Report(path);
+ else if (ret == -1)
+ status_ = 1;
+ }
+ }
+}
+
+bool Cleaner::IsAlreadyRemoved(const string& path) {
+ set<string>::iterator i = removed_.find(path);
+ return (i != removed_.end());
+}
+
+void Cleaner::RemoveEdgeFiles(Edge* edge) {
+ string depfile = edge->GetBinding("depfile");
+ if (!depfile.empty())
+ Remove(depfile);
+
+ string rspfile = edge->GetBinding("rspfile");
+ if (!rspfile.empty())
+ Remove(rspfile);
+}
+
+void Cleaner::PrintHeader() {
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+ printf("Cleaning...");
+ if (IsVerbose())
+ printf("\n");
+ else
+ printf(" ");
+}
+
+void Cleaner::PrintFooter() {
+ if (config_.verbosity == BuildConfig::QUIET)
+ return;
+ printf("%d files.\n", cleaned_files_count_);
+}
+
+int Cleaner::CleanAll(bool generator) {
+ Reset();
+ PrintHeader();
+ for (vector<Edge*>::iterator e = state_->edges_.begin();
+ e != state_->edges_.end(); ++e) {
+ // Do not try to remove phony targets
+ if ((*e)->is_phony())
+ continue;
+ // Do not remove generator's files unless generator specified.
+ if (!generator && (*e)->GetBindingBool("generator"))
+ continue;
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ Remove((*out_node)->path());
+ }
+
+ RemoveEdgeFiles(*e);
+ }
+ PrintFooter();
+ return status_;
+}
+
+void Cleaner::DoCleanTarget(Node* target) {
+ if (Edge* e = target->in_edge()) {
+ // Do not try to remove phony targets
+ if (!e->is_phony()) {
+ Remove(target->path());
+ RemoveEdgeFiles(e);
+ }
+ for (vector<Node*>::iterator n = e->inputs_.begin(); n != e->inputs_.end();
+ ++n) {
+ Node* next = *n;
+ // call DoCleanTarget recursively if this node has not been visited
+ if (cleaned_.count(next) == 0) {
+ DoCleanTarget(next);
+ }
+ }
+ }
+
+ // mark this target to be cleaned already
+ cleaned_.insert(target);
+}
+
+int Cleaner::CleanTarget(Node* target) {
+ assert(target);
+
+ Reset();
+ PrintHeader();
+ DoCleanTarget(target);
+ PrintFooter();
+ return status_;
+}
+
+int Cleaner::CleanTarget(const char* target) {
+ assert(target);
+
+ Reset();
+ Node* node = state_->LookupNode(target);
+ if (node) {
+ CleanTarget(node);
+ } else {
+ Error("unknown target '%s'", target);
+ status_ = 1;
+ }
+ return status_;
+}
+
+int Cleaner::CleanTargets(int target_count, char* targets[]) {
+ Reset();
+ PrintHeader();
+ for (int i = 0; i < target_count; ++i) {
+ const char* target_name = targets[i];
+ Node* target = state_->LookupNode(target_name);
+ if (target) {
+ if (IsVerbose())
+ printf("Target %s\n", target_name);
+ DoCleanTarget(target);
+ } else {
+ Error("unknown target '%s'", target_name);
+ status_ = 1;
+ }
+ }
+ PrintFooter();
+ return status_;
+}
+
+void Cleaner::DoCleanRule(const Rule* rule) {
+ assert(rule);
+
+ for (vector<Edge*>::iterator e = state_->edges_.begin();
+ e != state_->edges_.end(); ++e) {
+ if ((*e)->rule().name() == rule->name()) {
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ Remove((*out_node)->path());
+ RemoveEdgeFiles(*e);
+ }
+ }
+ }
+}
+
+int Cleaner::CleanRule(const Rule* rule) {
+ assert(rule);
+
+ Reset();
+ PrintHeader();
+ DoCleanRule(rule);
+ PrintFooter();
+ return status_;
+}
+
+int Cleaner::CleanRule(const char* rule) {
+ assert(rule);
+
+ Reset();
+ const Rule* r = state_->LookupRule(rule);
+ if (r) {
+ CleanRule(r);
+ } else {
+ Error("unknown rule '%s'", rule);
+ status_ = 1;
+ }
+ return status_;
+}
+
+int Cleaner::CleanRules(int rule_count, char* rules[]) {
+ assert(rules);
+
+ Reset();
+ PrintHeader();
+ for (int i = 0; i < rule_count; ++i) {
+ const char* rule_name = rules[i];
+ const Rule* rule = state_->LookupRule(rule_name);
+ if (rule) {
+ if (IsVerbose())
+ printf("Rule %s\n", rule_name);
+ DoCleanRule(rule);
+ } else {
+ Error("unknown rule '%s'", rule_name);
+ status_ = 1;
+ }
+ }
+ PrintFooter();
+ return status_;
+}
+
+void Cleaner::Reset() {
+ status_ = 0;
+ cleaned_files_count_ = 0;
+ removed_.clear();
+ cleaned_.clear();
+}
diff --git a/ninja/src/clean.h b/ninja/src/clean.h
new file mode 100644
index 00000000000..19432ab2d44
--- /dev/null
+++ b/ninja/src/clean.h
@@ -0,0 +1,107 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_CLEAN_H_
+#define NINJA_CLEAN_H_
+
+#include <set>
+#include <string>
+
+#include "build.h"
+
+using namespace std;
+
+struct State;
+struct Node;
+struct Rule;
+struct DiskInterface;
+
+struct Cleaner {
+ /// Build a cleaner object with a real disk interface.
+ Cleaner(State* state, const BuildConfig& config);
+
+ /// Build a cleaner object with the given @a disk_interface
+ /// (Useful for testing).
+ Cleaner(State* state,
+ const BuildConfig& config,
+ DiskInterface* disk_interface);
+
+ /// Clean the given @a target and all the file built for it.
+ /// @return non-zero if an error occurs.
+ int CleanTarget(Node* target);
+ /// Clean the given target @a target.
+ /// @return non-zero if an error occurs.
+ int CleanTarget(const char* target);
+ /// Clean the given target @a targets.
+ /// @return non-zero if an error occurs.
+ int CleanTargets(int target_count, char* targets[]);
+
+ /// Clean all built files, except for files created by generator rules.
+ /// @param generator If set, also clean files created by generator rules.
+ /// @return non-zero if an error occurs.
+ int CleanAll(bool generator = false);
+
+ /// Clean all the file built with the given rule @a rule.
+ /// @return non-zero if an error occurs.
+ int CleanRule(const Rule* rule);
+ /// Clean the file produced by the given @a rule.
+ /// @return non-zero if an error occurs.
+ int CleanRule(const char* rule);
+ /// Clean the file produced by the given @a rules.
+ /// @return non-zero if an error occurs.
+ int CleanRules(int rule_count, char* rules[]);
+
+ /// @return the number of file cleaned.
+ int cleaned_files_count() const {
+ return cleaned_files_count_;
+ }
+
+ /// @return whether the cleaner is in verbose mode.
+ bool IsVerbose() const {
+ return (config_.verbosity != BuildConfig::QUIET
+ && (config_.verbosity == BuildConfig::VERBOSE || config_.dry_run));
+ }
+
+ private:
+ /// Remove the file @a path.
+ /// @return whether the file has been removed.
+ int RemoveFile(const string& path);
+ /// @returns whether the file @a path exists.
+ bool FileExists(const string& path);
+ void Report(const string& path);
+
+ /// Remove the given @a path file only if it has not been already removed.
+ void Remove(const string& path);
+ /// @return whether the given @a path has already been removed.
+ bool IsAlreadyRemoved(const string& path);
+ /// Remove the depfile and rspfile for an Edge.
+ void RemoveEdgeFiles(Edge* edge);
+
+ /// Helper recursive method for CleanTarget().
+ void DoCleanTarget(Node* target);
+ void PrintHeader();
+ void PrintFooter();
+ void DoCleanRule(const Rule* rule);
+ void Reset();
+
+ State* state_;
+ const BuildConfig& config_;
+ set<string> removed_;
+ set<Node*> cleaned_;
+ int cleaned_files_count_;
+ DiskInterface* disk_interface_;
+ int status_;
+};
+
+#endif // NINJA_CLEAN_H_
diff --git a/ninja/src/clean_test.cc b/ninja/src/clean_test.cc
new file mode 100644
index 00000000000..04cff734aa7
--- /dev/null
+++ b/ninja/src/clean_test.cc
@@ -0,0 +1,374 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "clean.h"
+#include "build.h"
+
+#include "test.h"
+
+struct CleanTest : public StateTestWithBuiltinRules {
+ VirtualFileSystem fs_;
+ BuildConfig config_;
+ virtual void SetUp() {
+ config_.verbosity = BuildConfig::QUIET;
+ }
+};
+
+TEST_F(CleanTest, CleanAll) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(4u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ EXPECT_EQ(0, fs_.Stat("in1"));
+ EXPECT_EQ(0, fs_.Stat("out1"));
+ EXPECT_EQ(0, fs_.Stat("in2"));
+ EXPECT_EQ(0, fs_.Stat("out2"));
+ fs_.files_removed_.clear();
+
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanAllDryRun) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ config_.dry_run = true;
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+
+ // Check they are not removed.
+ EXPECT_NE(0, fs_.Stat("in1"));
+ EXPECT_NE(0, fs_.Stat("out1"));
+ EXPECT_NE(0, fs_.Stat("in2"));
+ EXPECT_NE(0, fs_.Stat("out2"));
+ fs_.files_removed_.clear();
+
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(4, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanTarget) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ EXPECT_EQ(0, fs_.Stat("in1"));
+ EXPECT_EQ(0, fs_.Stat("out1"));
+ EXPECT_NE(0, fs_.Stat("in2"));
+ EXPECT_NE(0, fs_.Stat("out2"));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanTargetDryRun) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ config_.dry_run = true;
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ EXPECT_NE(0, fs_.Stat("in1"));
+ EXPECT_NE(0, fs_.Stat("out1"));
+ EXPECT_NE(0, fs_.Stat("in2"));
+ EXPECT_NE(0, fs_.Stat("out2"));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRule) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cat_e\n"
+" command = cat -e $in > $out\n"
+"build in1: cat_e src1\n"
+"build out1: cat in1\n"
+"build in2: cat_e src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ EXPECT_EQ(0, fs_.Stat("in1"));
+ EXPECT_NE(0, fs_.Stat("out1"));
+ EXPECT_EQ(0, fs_.Stat("in2"));
+ EXPECT_NE(0, fs_.Stat("out2"));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(0, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRuleDryRun) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cat_e\n"
+" command = cat -e $in > $out\n"
+"build in1: cat_e src1\n"
+"build out1: cat in1\n"
+"build in2: cat_e src2\n"
+"build out2: cat in2\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ config_.dry_run = true;
+ Cleaner cleaner(&state_, config_, &fs_);
+
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ EXPECT_NE(0, fs_.Stat("in1"));
+ EXPECT_NE(0, fs_.Stat("out1"));
+ EXPECT_NE(0, fs_.Stat("in2"));
+ EXPECT_NE(0, fs_.Stat("out2"));
+ fs_.files_removed_.clear();
+
+ ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(0u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRuleGenerator) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule regen\n"
+" command = cat $in > $out\n"
+" generator = 1\n"
+"build out1: cat in1\n"
+"build out2: regen in2\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(1, cleaner.cleaned_files_count());
+ EXPECT_EQ(1u, fs_.files_removed_.size());
+
+ fs_.Create("out1", "");
+
+ EXPECT_EQ(0, cleaner.CleanAll(/*generator=*/true));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDepFile) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"build out1: cc in1\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out1.d", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDepFileOnCleanTarget) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"build out1: cc in1\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out1.d", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanDepFileOnCleanRule) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" depfile = $out.d\n"
+"build out1: cc in1\n"));
+ fs_.Create("out1", "");
+ fs_.Create("out1.d", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanRule("cc"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRspFile) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cc\n"
+" command = cc $in > $out\n"
+" rspfile = $rspfile\n"
+" rspfile_content=$in\n"
+"build out1: cc in1\n"
+" rspfile = cc1.rsp\n"
+" rspfile_content=$in\n"));
+ fs_.Create("out1", "");
+ fs_.Create("cc1.rsp", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_EQ(2u, fs_.files_removed_.size());
+}
+
+TEST_F(CleanTest, CleanRsp) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule cat_rsp \n"
+" command = cat $rspfile > $out\n"
+" rspfile = $rspfile\n"
+" rspfile_content = $in\n"
+"build in1: cat src1\n"
+"build out1: cat in1\n"
+"build in2: cat_rsp src2\n"
+" rspfile=in2.rsp\n"
+" rspfile_content=$in\n"
+"build out2: cat_rsp in2\n"
+" rspfile=out2.rsp\n"
+" rspfile_content=$in\n"));
+ fs_.Create("in1", "");
+ fs_.Create("out1", "");
+ fs_.Create("in2.rsp", "");
+ fs_.Create("out2.rsp", "");
+ fs_.Create("in2", "");
+ fs_.Create("out2", "");
+
+ Cleaner cleaner(&state_, config_, &fs_);
+ ASSERT_EQ(0, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("out1"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanTarget("in2"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ ASSERT_EQ(0, cleaner.CleanRule("cat_rsp"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+
+ EXPECT_EQ(6u, fs_.files_removed_.size());
+
+ // Check they are removed.
+ EXPECT_EQ(0, fs_.Stat("in1"));
+ EXPECT_EQ(0, fs_.Stat("out1"));
+ EXPECT_EQ(0, fs_.Stat("in2"));
+ EXPECT_EQ(0, fs_.Stat("out2"));
+ EXPECT_EQ(0, fs_.Stat("in2.rsp"));
+ EXPECT_EQ(0, fs_.Stat("out2.rsp"));
+
+ fs_.files_removed_.clear();
+}
+
+TEST_F(CleanTest, CleanFailure) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+ "build dir: cat src1\n"));
+ fs_.MakeDir("dir");
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_NE(0, cleaner.CleanAll());
+}
+
+TEST_F(CleanTest, CleanPhony) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build phony: phony t1 t2\n"
+"build t1: cat\n"
+"build t2: cat\n"));
+
+ fs_.Create("phony", "");
+ fs_.Create("t1", "");
+ fs_.Create("t2", "");
+
+ // Check that CleanAll does not remove "phony".
+ Cleaner cleaner(&state_, config_, &fs_);
+ EXPECT_EQ(0, cleaner.CleanAll());
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_NE(0, fs_.Stat("phony"));
+
+ fs_.Create("t1", "");
+ fs_.Create("t2", "");
+
+ // Check that CleanTarget does not remove "phony".
+ EXPECT_EQ(0, cleaner.CleanTarget("phony"));
+ EXPECT_EQ(2, cleaner.cleaned_files_count());
+ EXPECT_NE(0, fs_.Stat("phony"));
+}
diff --git a/ninja/src/depfile_parser.cc b/ninja/src/depfile_parser.cc
new file mode 100644
index 00000000000..5a30c6b1bb8
--- /dev/null
+++ b/ninja/src/depfile_parser.cc
@@ -0,0 +1,219 @@
+/* Generated by re2c 0.13.5 */
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "depfile_parser.h"
+
+// A note on backslashes in Makefiles, from reading the docs:
+// Backslash-newline is the line continuation character.
+// Backslash-# escapes a # (otherwise meaningful as a comment start).
+// Backslash-% escapes a % (otherwise meaningful as a special).
+// Finally, quoting the GNU manual, "Backslashes that are not in danger
+// of quoting ‘%’ characters go unmolested."
+// How do you end a line with a backslash? The netbsd Make docs suggest
+// reading the result of a shell command echoing a backslash!
+//
+// Rather than implement all of above, we do a simpler thing here:
+// Backslashes escape a set of characters (see "escapes" defined below),
+// otherwise they are passed through verbatim.
+// If anyone actually has depfiles that rely on the more complicated
+// behavior we can adjust this.
+bool DepfileParser::Parse(string* content, string* err) {
+ // in: current parser input point.
+ // end: end of input.
+ // parsing_targets: whether we are parsing targets or dependencies.
+ char* in = &(*content)[0];
+ char* end = in + content->size();
+ bool parsing_targets = true;
+ while (in < end) {
+ // out: current output point (typically same as in, but can fall behind
+ // as we de-escape backslashes).
+ char* out = in;
+ // filename: start of the current parsed filename.
+ char* filename = out;
+ for (;;) {
+ // start: beginning of the current parsed span.
+ const char* start = in;
+
+ {
+ char yych;
+ static const unsigned char yybm[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 0, 0,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 0, 0, 128,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 0, 128, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ yych = *in;
+ if (yych <= '[') {
+ if (yych <= '$') {
+ if (yych <= 0x00) goto yy7;
+ if (yych <= ' ') goto yy9;
+ if (yych <= '#') goto yy6;
+ goto yy4;
+ } else {
+ if (yych <= '=') goto yy6;
+ if (yych <= '?') goto yy9;
+ if (yych <= 'Z') goto yy6;
+ goto yy9;
+ }
+ } else {
+ if (yych <= '`') {
+ if (yych <= '\\') goto yy2;
+ if (yych == '_') goto yy6;
+ goto yy9;
+ } else {
+ if (yych <= 'z') goto yy6;
+ if (yych == '~') goto yy6;
+ goto yy9;
+ }
+ }
+yy2:
+ ++in;
+ if ((yych = *in) <= '#') {
+ if (yych <= '\n') {
+ if (yych <= 0x00) goto yy3;
+ if (yych <= '\t') goto yy14;
+ } else {
+ if (yych == ' ') goto yy16;
+ if (yych <= '"') goto yy14;
+ goto yy16;
+ }
+ } else {
+ if (yych <= 'Z') {
+ if (yych == '*') goto yy16;
+ goto yy14;
+ } else {
+ if (yych <= '\\') goto yy16;
+ if (yych == '|') goto yy16;
+ goto yy14;
+ }
+ }
+yy3:
+ {
+ // For any other character (e.g. whitespace), swallow it here,
+ // allowing the outer logic to loop around again.
+ break;
+ }
+yy4:
+ ++in;
+ if ((yych = *in) == '$') goto yy12;
+ goto yy11;
+yy5:
+ {
+ // Got a span of plain text.
+ int len = (int)(in - start);
+ // Need to shift it over if we're overwriting backslashes.
+ if (out < start)
+ memmove(out, start, len);
+ out += len;
+ continue;
+ }
+yy6:
+ yych = *++in;
+ goto yy11;
+yy7:
+ ++in;
+ {
+ break;
+ }
+yy9:
+ yych = *++in;
+ goto yy3;
+yy10:
+ ++in;
+ yych = *in;
+yy11:
+ if (yybm[0+yych] & 128) {
+ goto yy10;
+ }
+ goto yy5;
+yy12:
+ ++in;
+ if (yybm[0+(yych = *in)] & 128) {
+ goto yy10;
+ }
+ {
+ // De-escape dollar character.
+ *out++ = '$';
+ continue;
+ }
+yy14:
+ ++in;
+ {
+ // Let backslash before other characters through verbatim.
+ *out++ = '\\';
+ *out++ = yych;
+ continue;
+ }
+yy16:
+ ++in;
+ {
+ // De-escape backslashed character.
+ *out++ = yych;
+ continue;
+ }
+ }
+
+ }
+
+ int len = (int)(out - filename);
+ const bool is_target = parsing_targets;
+ if (len > 0 && filename[len - 1] == ':') {
+ len--; // Strip off trailing colon, if any.
+ parsing_targets = false;
+ }
+
+ if (len == 0)
+ continue;
+
+ if (!is_target) {
+ ins_.push_back(StringPiece(filename, len));
+ } else if (!out_.str_) {
+ out_ = StringPiece(filename, len);
+ } else if (out_ != StringPiece(filename, len)) {
+ *err = "depfile has multiple output paths.";
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/ninja/src/depfile_parser.h b/ninja/src/depfile_parser.h
new file mode 100644
index 00000000000..1e6ebb57950
--- /dev/null
+++ b/ninja/src/depfile_parser.h
@@ -0,0 +1,35 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DEPFILE_PARSER_H_
+#define NINJA_DEPFILE_PARSER_H_
+
+#include <string>
+#include <vector>
+using namespace std;
+
+#include "string_piece.h"
+
+/// Parser for the dependency information emitted by gcc's -M flags.
+struct DepfileParser {
+ /// Parse an input file. Input must be NUL-terminated.
+ /// Warning: may mutate the content in-place and parsed StringPieces are
+ /// pointers within it.
+ bool Parse(string* content, string* err);
+
+ StringPiece out_;
+ vector<StringPiece> ins_;
+};
+
+#endif // NINJA_DEPFILE_PARSER_H_
diff --git a/ninja/src/depfile_parser.in.cc b/ninja/src/depfile_parser.in.cc
new file mode 100644
index 00000000000..cf24a09cc20
--- /dev/null
+++ b/ninja/src/depfile_parser.in.cc
@@ -0,0 +1,116 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "depfile_parser.h"
+
+// A note on backslashes in Makefiles, from reading the docs:
+// Backslash-newline is the line continuation character.
+// Backslash-# escapes a # (otherwise meaningful as a comment start).
+// Backslash-% escapes a % (otherwise meaningful as a special).
+// Finally, quoting the GNU manual, "Backslashes that are not in danger
+// of quoting ‘%’ characters go unmolested."
+// How do you end a line with a backslash? The netbsd Make docs suggest
+// reading the result of a shell command echoing a backslash!
+//
+// Rather than implement all of above, we do a simpler thing here:
+// Backslashes escape a set of characters (see "escapes" defined below),
+// otherwise they are passed through verbatim.
+// If anyone actually has depfiles that rely on the more complicated
+// behavior we can adjust this.
+bool DepfileParser::Parse(string* content, string* err) {
+ // in: current parser input point.
+ // end: end of input.
+ // parsing_targets: whether we are parsing targets or dependencies.
+ char* in = &(*content)[0];
+ char* end = in + content->size();
+ bool parsing_targets = true;
+ while (in < end) {
+ // out: current output point (typically same as in, but can fall behind
+ // as we de-escape backslashes).
+ char* out = in;
+ // filename: start of the current parsed filename.
+ char* filename = out;
+ for (;;) {
+ // start: beginning of the current parsed span.
+ const char* start = in;
+ /*!re2c
+ re2c:define:YYCTYPE = "char";
+ re2c:define:YYCURSOR = in;
+ re2c:define:YYLIMIT = end;
+
+ re2c:yyfill:enable = 0;
+
+ re2c:indent:top = 2;
+ re2c:indent:string = " ";
+
+ nul = "\000";
+ escape = [ \\#*[|];
+
+ '\\' escape {
+ // De-escape backslashed character.
+ *out++ = yych;
+ continue;
+ }
+ '$$' {
+ // De-escape dollar character.
+ *out++ = '$';
+ continue;
+ }
+ '\\' [^\000\n] {
+ // Let backslash before other characters through verbatim.
+ *out++ = '\\';
+ *out++ = yych;
+ continue;
+ }
+ [a-zA-Z0-9+,/_:.~()@=-!]+ {
+ // Got a span of plain text.
+ int len = (int)(in - start);
+ // Need to shift it over if we're overwriting backslashes.
+ if (out < start)
+ memmove(out, start, len);
+ out += len;
+ continue;
+ }
+ nul {
+ break;
+ }
+ [^] {
+ // For any other character (e.g. whitespace), swallow it here,
+ // allowing the outer logic to loop around again.
+ break;
+ }
+ */
+ }
+
+ int len = (int)(out - filename);
+ const bool is_target = parsing_targets;
+ if (len > 0 && filename[len - 1] == ':') {
+ len--; // Strip off trailing colon, if any.
+ parsing_targets = false;
+ }
+
+ if (len == 0)
+ continue;
+
+ if (!is_target) {
+ ins_.push_back(StringPiece(filename, len));
+ } else if (!out_.str_) {
+ out_ = StringPiece(filename, len);
+ } else if (out_ != StringPiece(filename, len)) {
+ *err = "depfile has multiple output paths.";
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/ninja/src/depfile_parser_test.cc b/ninja/src/depfile_parser_test.cc
new file mode 100644
index 00000000000..0f6771a9901
--- /dev/null
+++ b/ninja/src/depfile_parser_test.cc
@@ -0,0 +1,139 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "depfile_parser.h"
+
+#include <gtest/gtest.h>
+
+struct DepfileParserTest : public testing::Test {
+ bool Parse(const char* input, string* err);
+
+ DepfileParser parser_;
+ string input_;
+};
+
+bool DepfileParserTest::Parse(const char* input, string* err) {
+ input_ = input;
+ return parser_.Parse(&input_, err);
+}
+
+TEST_F(DepfileParserTest, Basic) {
+ string err;
+ EXPECT_TRUE(Parse(
+"build/ninja.o: ninja.cc ninja.h eval_env.h manifest_parser.h\n",
+ &err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("build/ninja.o", parser_.out_.AsString());
+ EXPECT_EQ(4u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, EarlyNewlineAndWhitespace) {
+ string err;
+ EXPECT_TRUE(Parse(
+" \\\n"
+" out: in\n",
+ &err));
+ ASSERT_EQ("", err);
+}
+
+TEST_F(DepfileParserTest, Continuation) {
+ string err;
+ EXPECT_TRUE(Parse(
+"foo.o: \\\n"
+" bar.h baz.h\n",
+ &err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("foo.o", parser_.out_.AsString());
+ EXPECT_EQ(2u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, BackSlashes) {
+ string err;
+ EXPECT_TRUE(Parse(
+"Project\\Dir\\Build\\Release8\\Foo\\Foo.res : \\\n"
+" Dir\\Library\\Foo.rc \\\n"
+" Dir\\Library\\Version\\Bar.h \\\n"
+" Dir\\Library\\Foo.ico \\\n"
+" Project\\Thing\\Bar.tlb \\\n",
+ &err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("Project\\Dir\\Build\\Release8\\Foo\\Foo.res",
+ parser_.out_.AsString());
+ EXPECT_EQ(4u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, Spaces) {
+ string err;
+ EXPECT_TRUE(Parse(
+"a\\ bc\\ def: a\\ b c d",
+ &err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("a bc def",
+ parser_.out_.AsString());
+ ASSERT_EQ(3u, parser_.ins_.size());
+ EXPECT_EQ("a b",
+ parser_.ins_[0].AsString());
+ EXPECT_EQ("c",
+ parser_.ins_[1].AsString());
+ EXPECT_EQ("d",
+ parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, Escapes) {
+ // Put backslashes before a variety of characters, see which ones make
+ // it through.
+ string err;
+ EXPECT_TRUE(Parse(
+"\\!\\@\\#$$\\%\\^\\&\\\\",
+ &err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("\\!\\@#$\\%\\^\\&\\",
+ parser_.out_.AsString());
+ ASSERT_EQ(0u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, SpecialChars) {
+ // See filenames like istreambuf.iterator_op!= in
+ // https://github.com/google/libcxx/tree/master/test/iterators/stream.iterators/istreambuf.iterator/
+ string err;
+ EXPECT_TRUE(Parse(
+"C:/Program\\ Files\\ (x86)/Microsoft\\ crtdefs.h: \n"
+" en@quot.header~ t+t-x!=1",
+ &err));
+ ASSERT_EQ("", err);
+ EXPECT_EQ("C:/Program Files (x86)/Microsoft crtdefs.h",
+ parser_.out_.AsString());
+ ASSERT_EQ(2u, parser_.ins_.size());
+ EXPECT_EQ("en@quot.header~",
+ parser_.ins_[0].AsString());
+ EXPECT_EQ("t+t-x!=1",
+ parser_.ins_[1].AsString());
+}
+
+TEST_F(DepfileParserTest, UnifyMultipleOutputs) {
+ // check that multiple duplicate targets are properly unified
+ string err;
+ EXPECT_TRUE(Parse("foo foo: x y z", &err));
+ ASSERT_EQ(parser_.out_.AsString(), "foo");
+ ASSERT_EQ(parser_.ins_.size(), 3u);
+ EXPECT_EQ("x", parser_.ins_[0].AsString());
+ EXPECT_EQ("y", parser_.ins_[1].AsString());
+ EXPECT_EQ("z", parser_.ins_[2].AsString());
+}
+
+TEST_F(DepfileParserTest, RejectMultipleDifferentOutputs) {
+ // check that multiple different outputs are rejected by the parser
+ string err;
+ EXPECT_FALSE(Parse("foo bar: x y z", &err));
+}
diff --git a/ninja/src/deps_log.cc b/ninja/src/deps_log.cc
new file mode 100644
index 00000000000..ce9bf06b3f3
--- /dev/null
+++ b/ninja/src/deps_log.cc
@@ -0,0 +1,335 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "deps_log.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include "graph.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+
+// The version is stored as 4 bytes after the signature and also serves as a
+// byte order mark. Signature and version combined are 16 bytes long.
+const char kFileSignature[] = "# ninjadeps\n";
+const int kCurrentVersion = 1;
+
+// Since the size field is 2 bytes and the top bit marks deps entries, a single
+// record can be at most 32 kB. Set the buffer size to this and flush the file
+// buffer after every record to make sure records aren't written partially.
+const int kMaxBufferSize = 1 << 15;
+
+DepsLog::~DepsLog() {
+ Close();
+}
+
+bool DepsLog::OpenForWrite(const string& path, string* err) {
+ if (needs_recompaction_) {
+ Close();
+ if (!Recompact(path, err))
+ return false;
+ }
+
+ file_ = fopen(path.c_str(), "ab");
+ if (!file_) {
+ *err = strerror(errno);
+ return false;
+ }
+ setvbuf(file_, NULL, _IOFBF, kMaxBufferSize);
+ SetCloseOnExec(fileno(file_));
+
+ // Opening a file in append mode doesn't set the file pointer to the file's
+ // end on Windows. Do that explicitly.
+ fseek(file_, 0, SEEK_END);
+
+ if (ftell(file_) == 0) {
+ if (fwrite(kFileSignature, sizeof(kFileSignature) - 1, 1, file_) < 1) {
+ *err = strerror(errno);
+ return false;
+ }
+ if (fwrite(&kCurrentVersion, 4, 1, file_) < 1) {
+ *err = strerror(errno);
+ return false;
+ }
+ }
+ fflush(file_);
+
+ return true;
+}
+
+bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
+ const vector<Node*>& nodes) {
+ return RecordDeps(node, mtime, nodes.size(),
+ nodes.empty() ? NULL : (Node**)&nodes.front());
+}
+
+bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
+ int node_count, Node** nodes) {
+ // Track whether there's any new data to be recorded.
+ bool made_change = false;
+
+ // Assign ids to all nodes that are missing one.
+ if (node->id() < 0) {
+ RecordId(node);
+ made_change = true;
+ }
+ for (int i = 0; i < node_count; ++i) {
+ if (nodes[i]->id() < 0) {
+ RecordId(nodes[i]);
+ made_change = true;
+ }
+ }
+
+ // See if the new data is different than the existing data, if any.
+ if (!made_change) {
+ Deps* deps = GetDeps(node);
+ if (!deps ||
+ deps->mtime != mtime ||
+ deps->node_count != node_count) {
+ made_change = true;
+ } else {
+ for (int i = 0; i < node_count; ++i) {
+ if (deps->nodes[i] != nodes[i]) {
+ made_change = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Don't write anything if there's no new info.
+ if (!made_change)
+ return true;
+
+ // Update on-disk representation.
+ uint16_t size = 4 * (1 + 1 + (uint16_t)node_count);
+ size |= 0x8000; // Deps record: set high bit.
+ fwrite(&size, 2, 1, file_);
+ int id = node->id();
+ fwrite(&id, 4, 1, file_);
+ int timestamp = mtime;
+ fwrite(&timestamp, 4, 1, file_);
+ for (int i = 0; i < node_count; ++i) {
+ id = nodes[i]->id();
+ fwrite(&id, 4, 1, file_);
+ }
+ fflush(file_);
+
+ // Update in-memory representation.
+ Deps* deps = new Deps(mtime, node_count);
+ for (int i = 0; i < node_count; ++i)
+ deps->nodes[i] = nodes[i];
+ UpdateDeps(node->id(), deps);
+
+ return true;
+}
+
+void DepsLog::Close() {
+ if (file_)
+ fclose(file_);
+ file_ = NULL;
+}
+
+bool DepsLog::Load(const string& path, State* state, string* err) {
+ METRIC_RECORD(".ninja_deps load");
+ char buf[32 << 10];
+ FILE* f = fopen(path.c_str(), "rb");
+ if (!f) {
+ if (errno == ENOENT)
+ return true;
+ *err = strerror(errno);
+ return false;
+ }
+
+ bool valid_header = true;
+ int version = 0;
+ if (!fgets(buf, sizeof(buf), f) || fread(&version, 4, 1, f) < 1)
+ valid_header = false;
+ if (!valid_header || strcmp(buf, kFileSignature) != 0 ||
+ version != kCurrentVersion) {
+ *err = "bad deps log signature or version; starting over";
+ fclose(f);
+ unlink(path.c_str());
+ // Don't report this as a failure. An empty deps log will cause
+ // us to rebuild the outputs anyway.
+ return true;
+ }
+
+ long offset;
+ bool read_failed = false;
+ int unique_dep_record_count = 0;
+ int total_dep_record_count = 0;
+ for (;;) {
+ offset = ftell(f);
+
+ uint16_t size;
+ if (fread(&size, 2, 1, f) < 1) {
+ if (!feof(f))
+ read_failed = true;
+ break;
+ }
+ bool is_deps = (size >> 15) != 0;
+ size = size & 0x7FFF;
+
+ if (fread(buf, size, 1, f) < 1) {
+ read_failed = true;
+ break;
+ }
+
+ if (is_deps) {
+ assert(size % 4 == 0);
+ int* deps_data = reinterpret_cast<int*>(buf);
+ int out_id = deps_data[0];
+ int mtime = deps_data[1];
+ deps_data += 2;
+ int deps_count = (size / 4) - 2;
+
+ Deps* deps = new Deps(mtime, deps_count);
+ for (int i = 0; i < deps_count; ++i) {
+ assert(deps_data[i] < (int)nodes_.size());
+ assert(nodes_[deps_data[i]]);
+ deps->nodes[i] = nodes_[deps_data[i]];
+ }
+
+ total_dep_record_count++;
+ if (!UpdateDeps(out_id, deps))
+ ++unique_dep_record_count;
+ } else {
+ StringPiece path(buf, size);
+ Node* node = state->GetNode(path);
+ assert(node->id() < 0);
+ node->set_id(nodes_.size());
+ nodes_.push_back(node);
+ }
+ }
+
+ if (read_failed) {
+ // An error occurred while loading; try to recover by truncating the
+ // file to the last fully-read record.
+ if (ferror(f)) {
+ *err = strerror(ferror(f));
+ } else {
+ *err = "premature end of file";
+ }
+ fclose(f);
+
+ if (!Truncate(path.c_str(), offset, err))
+ return false;
+
+ // The truncate succeeded; we'll just report the load error as a
+ // warning because the build can proceed.
+ *err += "; recovering";
+ return true;
+ }
+
+ fclose(f);
+
+ // Rebuild the log if there are too many dead records.
+ int kMinCompactionEntryCount = 1000;
+ int kCompactionRatio = 3;
+ if (total_dep_record_count > kMinCompactionEntryCount &&
+ total_dep_record_count > unique_dep_record_count * kCompactionRatio) {
+ needs_recompaction_ = true;
+ }
+
+ return true;
+}
+
+DepsLog::Deps* DepsLog::GetDeps(Node* node) {
+ // Abort if the node has no id (never referenced in the deps) or if
+ // there's no deps recorded for the node.
+ if (node->id() < 0 || node->id() >= (int)deps_.size())
+ return NULL;
+ return deps_[node->id()];
+}
+
+bool DepsLog::Recompact(const string& path, string* err) {
+ METRIC_RECORD(".ninja_deps recompact");
+ printf("Recompacting deps...\n");
+
+ string temp_path = path + ".recompact";
+
+ // OpenForWrite() opens for append. Make sure it's not appending to a
+ // left-over file from a previous recompaction attempt that crashed somehow.
+ unlink(temp_path.c_str());
+
+ DepsLog new_log;
+ if (!new_log.OpenForWrite(temp_path, err))
+ return false;
+
+ // Clear all known ids so that new ones can be reassigned. The new indices
+ // will refer to the ordering in new_log, not in the current log.
+ for (vector<Node*>::iterator i = nodes_.begin(); i != nodes_.end(); ++i)
+ (*i)->set_id(-1);
+
+ // Write out all deps again.
+ for (int old_id = 0; old_id < (int)deps_.size(); ++old_id) {
+ Deps* deps = deps_[old_id];
+ if (!deps) continue; // If nodes_[old_id] is a leaf, it has no deps.
+
+ if (!new_log.RecordDeps(nodes_[old_id], deps->mtime,
+ deps->node_count, deps->nodes)) {
+ new_log.Close();
+ return false;
+ }
+ }
+
+ new_log.Close();
+
+ // All nodes now have ids that refer to new_log, so steal its data.
+ deps_.swap(new_log.deps_);
+ nodes_.swap(new_log.nodes_);
+
+ if (unlink(path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ if (rename(temp_path.c_str(), path.c_str()) < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+
+ return true;
+}
+
+bool DepsLog::UpdateDeps(int out_id, Deps* deps) {
+ if (out_id >= (int)deps_.size())
+ deps_.resize(out_id + 1);
+
+ bool delete_old = deps_[out_id] != NULL;
+ if (delete_old)
+ delete deps_[out_id];
+ deps_[out_id] = deps;
+ return delete_old;
+}
+
+bool DepsLog::RecordId(Node* node) {
+ uint16_t size = (uint16_t)node->path().size();
+ fwrite(&size, 2, 1, file_);
+ fwrite(node->path().data(), node->path().size(), 1, file_);
+ fflush(file_);
+
+ node->set_id(nodes_.size());
+ nodes_.push_back(node);
+
+ return true;
+}
diff --git a/ninja/src/deps_log.h b/ninja/src/deps_log.h
new file mode 100644
index 00000000000..de0fe639d14
--- /dev/null
+++ b/ninja/src/deps_log.h
@@ -0,0 +1,110 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DEPS_LOG_H_
+#define NINJA_DEPS_LOG_H_
+
+#include <string>
+#include <vector>
+using namespace std;
+
+#include <stdio.h>
+
+#include "timestamp.h"
+
+struct Node;
+struct State;
+
+/// As build commands run they can output extra dependency information
+/// (e.g. header dependencies for C source) dynamically. DepsLog collects
+/// that information at build time and uses it for subsequent builds.
+///
+/// The on-disk format is based on two primary design constraints:
+/// - it must be written to as a stream (during the build, which may be
+/// interrupted);
+/// - it can be read all at once on startup. (Alternative designs, where
+/// it contains indexing information, were considered and discarded as
+/// too complicated to implement; if the file is small than reading it
+/// fully on startup is acceptable.)
+/// Here are some stats from the Windows Chrome dependency files, to
+/// help guide the design space. The total text in the files sums to
+/// 90mb so some compression is warranted to keep load-time fast.
+/// There's about 10k files worth of dependencies that reference about
+/// 40k total paths totalling 2mb of unique strings.
+///
+/// Based on these stats, here's the current design.
+/// The file is structured as version header followed by a sequence of records.
+/// Each record is either a path string or a dependency list.
+/// Numbering the path strings in file order gives them dense integer ids.
+/// A dependency list maps an output id to a list of input ids.
+///
+/// Concretely, a record is:
+/// two bytes record length, high bit indicates record type
+/// (implies max record length 32k)
+/// path records contain just the string name of the path
+/// dependency records are an array of 4-byte integers
+/// [output path id, output path mtime, input path id, input path id...]
+/// (The mtime is compared against the on-disk output path mtime
+/// to verify the stored data is up-to-date.)
+/// If two records reference the same output the latter one in the file
+/// wins, allowing updates to just be appended to the file. A separate
+/// repacking step can run occasionally to remove dead records.
+struct DepsLog {
+ DepsLog() : needs_recompaction_(false), file_(NULL) {}
+ ~DepsLog();
+
+ // Writing (build-time) interface.
+ bool OpenForWrite(const string& path, string* err);
+ bool RecordDeps(Node* node, TimeStamp mtime, const vector<Node*>& nodes);
+ bool RecordDeps(Node* node, TimeStamp mtime, int node_count, Node** nodes);
+ void Close();
+
+ // Reading (startup-time) interface.
+ struct Deps {
+ Deps(int mtime, int node_count)
+ : mtime(mtime), node_count(node_count), nodes(new Node*[node_count]) {}
+ ~Deps() { delete [] nodes; }
+ int mtime;
+ int node_count;
+ Node** nodes;
+ };
+ bool Load(const string& path, State* state, string* err);
+ Deps* GetDeps(Node* node);
+
+ /// Rewrite the known log entries, throwing away old data.
+ bool Recompact(const string& path, string* err);
+
+ /// Used for tests.
+ const vector<Node*>& nodes() const { return nodes_; }
+ const vector<Deps*>& deps() const { return deps_; }
+
+ private:
+ // Updates the in-memory representation. Takes ownership of |deps|.
+ // Returns true if a prior deps record was deleted.
+ bool UpdateDeps(int out_id, Deps* deps);
+ // Write a node name record, assigning it an id.
+ bool RecordId(Node* node);
+
+ bool needs_recompaction_;
+ FILE* file_;
+
+ /// Maps id -> Node.
+ vector<Node*> nodes_;
+ /// Maps id -> deps of that id.
+ vector<Deps*> deps_;
+
+ friend struct DepsLogTest;
+};
+
+#endif // NINJA_DEPS_LOG_H_
diff --git a/ninja/src/deps_log_test.cc b/ninja/src/deps_log_test.cc
new file mode 100644
index 00000000000..3b329635519
--- /dev/null
+++ b/ninja/src/deps_log_test.cc
@@ -0,0 +1,384 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "deps_log.h"
+
+#include "graph.h"
+#include "util.h"
+#include "test.h"
+
+namespace {
+
+const char kTestFilename[] = "DepsLogTest-tempfile";
+
+struct DepsLogTest : public testing::Test {
+ virtual void SetUp() {
+ // In case a crashing test left a stale file behind.
+ unlink(kTestFilename);
+ }
+ virtual void TearDown() {
+ unlink(kTestFilename);
+ }
+};
+
+TEST_F(DepsLogTest, WriteRead) {
+ State state1;
+ DepsLog log1;
+ string err;
+ EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ {
+ vector<Node*> deps;
+ deps.push_back(state1.GetNode("foo.h"));
+ deps.push_back(state1.GetNode("bar.h"));
+ log1.RecordDeps(state1.GetNode("out.o"), 1, deps);
+
+ deps.clear();
+ deps.push_back(state1.GetNode("foo.h"));
+ deps.push_back(state1.GetNode("bar2.h"));
+ log1.RecordDeps(state1.GetNode("out2.o"), 2, deps);
+
+ DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o"));
+ ASSERT_TRUE(log_deps);
+ ASSERT_EQ(1, log_deps->mtime);
+ ASSERT_EQ(2, log_deps->node_count);
+ ASSERT_EQ("foo.h", log_deps->nodes[0]->path());
+ ASSERT_EQ("bar.h", log_deps->nodes[1]->path());
+ }
+
+ log1.Close();
+
+ State state2;
+ DepsLog log2;
+ EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err));
+ ASSERT_EQ("", err);
+
+ ASSERT_EQ(log1.nodes().size(), log2.nodes().size());
+ for (int i = 0; i < (int)log1.nodes().size(); ++i) {
+ Node* node1 = log1.nodes()[i];
+ Node* node2 = log2.nodes()[i];
+ ASSERT_EQ(i, node1->id());
+ ASSERT_EQ(node1->id(), node2->id());
+ }
+
+ // Spot-check the entries in log2.
+ DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out2.o"));
+ ASSERT_TRUE(log_deps);
+ ASSERT_EQ(2, log_deps->mtime);
+ ASSERT_EQ(2, log_deps->node_count);
+ ASSERT_EQ("foo.h", log_deps->nodes[0]->path());
+ ASSERT_EQ("bar2.h", log_deps->nodes[1]->path());
+}
+
+// Verify that adding the same deps twice doesn't grow the file.
+TEST_F(DepsLogTest, DoubleEntry) {
+ // Write some deps to the file and grab its size.
+ int file_size;
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar.h"));
+ log.RecordDeps(state.GetNode("out.o"), 1, deps);
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size = (int)st.st_size;
+ ASSERT_GT(file_size, 0);
+ }
+
+ // Now reload the file, and readd the same deps.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar.h"));
+ log.RecordDeps(state.GetNode("out.o"), 1, deps);
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ int file_size_2 = (int)st.st_size;
+ ASSERT_EQ(file_size, file_size_2);
+ }
+}
+
+// Verify that adding the new deps works and can be compacted away.
+TEST_F(DepsLogTest, Recompact) {
+ // Write some deps to the file and grab its size.
+ int file_size;
+ {
+ State state;
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar.h"));
+ log.RecordDeps(state.GetNode("out.o"), 1, deps);
+
+ deps.clear();
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("baz.h"));
+ log.RecordDeps(state.GetNode("other_out.o"), 1, deps);
+
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size = (int)st.st_size;
+ ASSERT_GT(file_size, 0);
+ }
+
+ // Now reload the file, and add slighly different deps.
+ int file_size_2;
+ {
+ State state;
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ log.RecordDeps(state.GetNode("out.o"), 1, deps);
+ log.Close();
+
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ file_size_2 = (int)st.st_size;
+ // The file should grow to record the new deps.
+ ASSERT_GT(file_size_2, file_size);
+ }
+
+ // Now reload the file, verify the new deps have replaced the old, then
+ // recompact.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ Node* out = state.GetNode("out.o");
+ DepsLog::Deps* deps = log.GetDeps(out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(1, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+
+ Node* other_out = state.GetNode("other_out.o");
+ deps = log.GetDeps(other_out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(2, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ("baz.h", deps->nodes[1]->path());
+
+ ASSERT_TRUE(log.Recompact(kTestFilename, &err));
+
+ // The in-memory deps graph should still be valid after recompaction.
+ deps = log.GetDeps(out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(1, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ(out, log.nodes()[out->id()]);
+
+ deps = log.GetDeps(other_out);
+ ASSERT_TRUE(deps);
+ ASSERT_EQ(1, deps->mtime);
+ ASSERT_EQ(2, deps->node_count);
+ ASSERT_EQ("foo.h", deps->nodes[0]->path());
+ ASSERT_EQ("baz.h", deps->nodes[1]->path());
+ ASSERT_EQ(other_out, log.nodes()[other_out->id()]);
+
+ // The file should have shrunk a bit for the smaller deps.
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ int file_size_3 = (int)st.st_size;
+ ASSERT_LT(file_size_3, file_size_2);
+ }
+}
+
+// Verify that invalid file headers cause a new build.
+TEST_F(DepsLogTest, InvalidHeader) {
+ const char *kInvalidHeaders[] = {
+ "", // Empty file.
+ "# ninjad", // Truncated first line.
+ "# ninjadeps\n", // No version int.
+ "# ninjadeps\n\001\002", // Truncated version int.
+ "# ninjadeps\n\001\002\003\004" // Invalid version int.
+ };
+ for (size_t i = 0; i < sizeof(kInvalidHeaders) / sizeof(kInvalidHeaders[0]);
+ ++i) {
+ FILE* deps_log = fopen(kTestFilename, "wb");
+ ASSERT_TRUE(deps_log != NULL);
+ ASSERT_EQ(
+ strlen(kInvalidHeaders[i]),
+ fwrite(kInvalidHeaders[i], 1, strlen(kInvalidHeaders[i]), deps_log));
+ ASSERT_EQ(0 ,fclose(deps_log));
+
+ string err;
+ DepsLog log;
+ State state;
+ ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
+ EXPECT_EQ("bad deps log signature or version; starting over", err);
+ }
+}
+
+// Simulate what happens when loading a truncated log file.
+TEST_F(DepsLogTest, Truncated) {
+ // Create a file with some entries.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar.h"));
+ log.RecordDeps(state.GetNode("out.o"), 1, deps);
+
+ deps.clear();
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar2.h"));
+ log.RecordDeps(state.GetNode("out2.o"), 2, deps);
+
+ log.Close();
+ }
+
+ // Get the file size.
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+
+ // Try reloading at truncated sizes.
+ // Track how many nodes/deps were found; they should decrease with
+ // smaller sizes.
+ int node_count = 5;
+ int deps_count = 2;
+ for (int size = (int)st.st_size; size > 0; --size) {
+ string err;
+ ASSERT_TRUE(Truncate(kTestFilename, size, &err));
+
+ State state;
+ DepsLog log;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+ if (!err.empty()) {
+ // At some point the log will be so short as to be unparseable.
+ break;
+ }
+
+ ASSERT_GE(node_count, (int)log.nodes().size());
+ node_count = log.nodes().size();
+
+ // Count how many non-NULL deps entries there are.
+ int new_deps_count = 0;
+ for (vector<DepsLog::Deps*>::const_iterator i = log.deps().begin();
+ i != log.deps().end(); ++i) {
+ if (*i)
+ ++new_deps_count;
+ }
+ ASSERT_GE(deps_count, new_deps_count);
+ deps_count = new_deps_count;
+ }
+}
+
+// Run the truncation-recovery logic.
+TEST_F(DepsLogTest, TruncatedRecovery) {
+ // Create a file with some entries.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar.h"));
+ log.RecordDeps(state.GetNode("out.o"), 1, deps);
+
+ deps.clear();
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar2.h"));
+ log.RecordDeps(state.GetNode("out2.o"), 2, deps);
+
+ log.Close();
+ }
+
+ // Shorten the file, corrupting the last record.
+ struct stat st;
+ ASSERT_EQ(0, stat(kTestFilename, &st));
+ string err;
+ ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err));
+
+ // Load the file again, add an entry.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+ ASSERT_EQ("premature end of file; recovering", err);
+ err.clear();
+
+ // The truncated entry should've been discarded.
+ EXPECT_EQ(NULL, log.GetDeps(state.GetNode("out2.o")));
+
+ EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
+ ASSERT_EQ("", err);
+
+ // Add a new entry.
+ vector<Node*> deps;
+ deps.push_back(state.GetNode("foo.h"));
+ deps.push_back(state.GetNode("bar2.h"));
+ log.RecordDeps(state.GetNode("out2.o"), 3, deps);
+
+ log.Close();
+ }
+
+ // Load the file a third time to verify appending after a mangled
+ // entry doesn't break things.
+ {
+ State state;
+ DepsLog log;
+ string err;
+ EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
+
+ // The truncated entry should exist.
+ DepsLog::Deps* deps = log.GetDeps(state.GetNode("out2.o"));
+ ASSERT_TRUE(deps);
+ }
+}
+
+} // anonymous namespace
diff --git a/ninja/src/disk_interface.cc b/ninja/src/disk_interface.cc
new file mode 100644
index 00000000000..ee3e99a3b98
--- /dev/null
+++ b/ninja/src/disk_interface.cc
@@ -0,0 +1,177 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "disk_interface.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#include <direct.h> // _mkdir
+#endif
+
+#include "util.h"
+
+namespace {
+
+string DirName(const string& path) {
+#ifdef _WIN32
+ const char kPathSeparator = '\\';
+#else
+ const char kPathSeparator = '/';
+#endif
+
+ string::size_type slash_pos = path.rfind(kPathSeparator);
+ if (slash_pos == string::npos)
+ return string(); // Nothing to do.
+ while (slash_pos > 0 && path[slash_pos - 1] == kPathSeparator)
+ --slash_pos;
+ return path.substr(0, slash_pos);
+}
+
+int MakeDir(const string& path) {
+#ifdef _WIN32
+ return _mkdir(path.c_str());
+#else
+ return mkdir(path.c_str(), 0777);
+#endif
+}
+
+} // namespace
+
+// DiskInterface ---------------------------------------------------------------
+
+bool DiskInterface::MakeDirs(const string& path) {
+ string dir = DirName(path);
+ if (dir.empty())
+ return true; // Reached root; assume it's there.
+ TimeStamp mtime = Stat(dir);
+ if (mtime < 0)
+ return false; // Error.
+ if (mtime > 0)
+ return true; // Exists already; we're done.
+
+ // Directory doesn't exist. Try creating its parent first.
+ bool success = MakeDirs(dir);
+ if (!success)
+ return false;
+ return MakeDir(dir);
+}
+
+// RealDiskInterface -----------------------------------------------------------
+
+TimeStamp RealDiskInterface::Stat(const string& path) {
+#ifdef _WIN32
+ // MSDN: "Naming Files, Paths, and Namespaces"
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+ if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) {
+ if (!quiet_) {
+ Error("Stat(%s): Filename longer than %i characters",
+ path.c_str(), MAX_PATH);
+ }
+ return -1;
+ }
+ WIN32_FILE_ATTRIBUTE_DATA attrs;
+ if (!GetFileAttributesEx(path.c_str(), GetFileExInfoStandard, &attrs)) {
+ DWORD err = GetLastError();
+ if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND)
+ return 0;
+ if (!quiet_) {
+ Error("GetFileAttributesEx(%s): %s", path.c_str(),
+ GetLastErrorString().c_str());
+ }
+ return -1;
+ }
+ const FILETIME& filetime = attrs.ftLastWriteTime;
+ // FILETIME is in 100-nanosecond increments since the Windows epoch.
+ // We don't much care about epoch correctness but we do want the
+ // resulting value to fit in an integer.
+ uint64_t mtime = ((uint64_t)filetime.dwHighDateTime << 32) |
+ ((uint64_t)filetime.dwLowDateTime);
+ mtime /= 1000000000LL / 100; // 100ns -> s.
+ mtime -= 12622770400LL; // 1600 epoch -> 2000 epoch (subtract 400 years).
+ return (TimeStamp)mtime;
+#else
+ struct stat st;
+ if (stat(path.c_str(), &st) < 0) {
+ if (errno == ENOENT || errno == ENOTDIR)
+ return 0;
+ if (!quiet_) {
+ Error("stat(%s): %s", path.c_str(), strerror(errno));
+ }
+ return -1;
+ }
+ return st.st_mtime;
+#endif
+}
+
+bool RealDiskInterface::WriteFile(const string& path, const string& contents) {
+ FILE * fp = fopen(path.c_str(), "w");
+ if (fp == NULL) {
+ Error("WriteFile(%s): Unable to create file. %s",
+ path.c_str(), strerror(errno));
+ return false;
+ }
+
+ if (fwrite(contents.data(), 1, contents.length(), fp) < contents.length()) {
+ Error("WriteFile(%s): Unable to write to the file. %s",
+ path.c_str(), strerror(errno));
+ fclose(fp);
+ return false;
+ }
+
+ if (fclose(fp) == EOF) {
+ Error("WriteFile(%s): Unable to close the file. %s",
+ path.c_str(), strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+bool RealDiskInterface::MakeDir(const string& path) {
+ if (::MakeDir(path) < 0) {
+ Error("mkdir(%s): %s", path.c_str(), strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+string RealDiskInterface::ReadFile(const string& path, string* err) {
+ string contents;
+ int ret = ::ReadFile(path, &contents, err);
+ if (ret == -ENOENT) {
+ // Swallow ENOENT.
+ err->clear();
+ }
+ return contents;
+}
+
+int RealDiskInterface::RemoveFile(const string& path) {
+ if (remove(path.c_str()) < 0) {
+ switch (errno) {
+ case ENOENT:
+ return 1;
+ default:
+ Error("remove(%s): %s", path.c_str(), strerror(errno));
+ return -1;
+ }
+ } else {
+ return 0;
+ }
+}
diff --git a/ninja/src/disk_interface.h b/ninja/src/disk_interface.h
new file mode 100644
index 00000000000..ff1e21c87fe
--- /dev/null
+++ b/ninja/src/disk_interface.h
@@ -0,0 +1,70 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_DISK_INTERFACE_H_
+#define NINJA_DISK_INTERFACE_H_
+
+#include <string>
+using namespace std;
+
+#include "timestamp.h"
+
+/// Interface for accessing the disk.
+///
+/// Abstract so it can be mocked out for tests. The real implementation
+/// is RealDiskInterface.
+struct DiskInterface {
+ virtual ~DiskInterface() {}
+
+ /// stat() a file, returning the mtime, or 0 if missing and -1 on
+ /// other errors.
+ virtual TimeStamp Stat(const string& path) = 0;
+
+ /// Create a directory, returning false on failure.
+ virtual bool MakeDir(const string& path) = 0;
+
+ /// Create a file, with the specified name and contents
+ /// Returns true on success, false on failure
+ virtual bool WriteFile(const string& path, const string& contents) = 0;
+
+ /// Read a file to a string. Fill in |err| on error.
+ virtual string ReadFile(const string& path, string* err) = 0;
+
+ /// Remove the file named @a path. It behaves like 'rm -f path' so no errors
+ /// are reported if it does not exists.
+ /// @returns 0 if the file has been removed,
+ /// 1 if the file does not exist, and
+ /// -1 if an error occurs.
+ virtual int RemoveFile(const string& path) = 0;
+
+ /// Create all the parent directories for path; like mkdir -p
+ /// `basename path`.
+ bool MakeDirs(const string& path);
+};
+
+/// Implementation of DiskInterface that actually hits the disk.
+struct RealDiskInterface : public DiskInterface {
+ RealDiskInterface() : quiet_(false) {}
+ virtual ~RealDiskInterface() {}
+ virtual TimeStamp Stat(const string& path);
+ virtual bool MakeDir(const string& path);
+ virtual bool WriteFile(const string& path, const string& contents);
+ virtual string ReadFile(const string& path, string* err);
+ virtual int RemoveFile(const string& path);
+
+ /// Whether to print on errors. Used to make a test quieter.
+ bool quiet_;
+};
+
+#endif // NINJA_DISK_INTERFACE_H_
diff --git a/ninja/src/disk_interface_test.cc b/ninja/src/disk_interface_test.cc
new file mode 100644
index 00000000000..55822a68ea0
--- /dev/null
+++ b/ninja/src/disk_interface_test.cc
@@ -0,0 +1,207 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#ifdef _WIN32
+#include <io.h>
+#include <windows.h>
+#endif
+
+#include "disk_interface.h"
+#include "graph.h"
+#include "test.h"
+
+namespace {
+
+struct DiskInterfaceTest : public testing::Test {
+ virtual void SetUp() {
+ // These tests do real disk accesses, so create a temp dir.
+ temp_dir_.CreateAndEnter("Ninja-DiskInterfaceTest");
+ }
+
+ virtual void TearDown() {
+ temp_dir_.Cleanup();
+ }
+
+ bool Touch(const char* path) {
+ FILE *f = fopen(path, "w");
+ if (!f)
+ return false;
+ return fclose(f) == 0;
+ }
+
+ ScopedTempDir temp_dir_;
+ RealDiskInterface disk_;
+};
+
+TEST_F(DiskInterfaceTest, StatMissingFile) {
+ EXPECT_EQ(0, disk_.Stat("nosuchfile"));
+
+ // On Windows, the errno for a file in a nonexistent directory
+ // is different.
+ EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile"));
+
+ // On POSIX systems, the errno is different if a component of the
+ // path prefix is not a directory.
+ ASSERT_TRUE(Touch("notadir"));
+ EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile"));
+}
+
+TEST_F(DiskInterfaceTest, StatBadPath) {
+ disk_.quiet_ = true;
+#ifdef _WIN32
+ string bad_path("cc:\\foo");
+ EXPECT_EQ(-1, disk_.Stat(bad_path));
+#else
+ string too_long_name(512, 'x');
+ EXPECT_EQ(-1, disk_.Stat(too_long_name));
+#endif
+ disk_.quiet_ = false;
+}
+
+TEST_F(DiskInterfaceTest, StatExistingFile) {
+ ASSERT_TRUE(Touch("file"));
+ EXPECT_GT(disk_.Stat("file"), 1);
+}
+
+TEST_F(DiskInterfaceTest, ReadFile) {
+ string err;
+ EXPECT_EQ("", disk_.ReadFile("foobar", &err));
+ EXPECT_EQ("", err);
+
+ const char* kTestFile = "testfile";
+ FILE* f = fopen(kTestFile, "wb");
+ ASSERT_TRUE(f);
+ const char* kTestContent = "test content\nok";
+ fprintf(f, "%s", kTestContent);
+ ASSERT_EQ(0, fclose(f));
+
+ EXPECT_EQ(kTestContent, disk_.ReadFile(kTestFile, &err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(DiskInterfaceTest, MakeDirs) {
+ EXPECT_TRUE(disk_.MakeDirs("path/with/double//slash/"));
+}
+
+TEST_F(DiskInterfaceTest, RemoveFile) {
+ const char* kFileName = "file-to-remove";
+ ASSERT_TRUE(Touch(kFileName));
+ EXPECT_EQ(0, disk_.RemoveFile(kFileName));
+ EXPECT_EQ(1, disk_.RemoveFile(kFileName));
+ EXPECT_EQ(1, disk_.RemoveFile("does not exist"));
+}
+
+struct StatTest : public StateTestWithBuiltinRules,
+ public DiskInterface {
+ StatTest() : scan_(&state_, NULL, NULL, this) {}
+
+ // DiskInterface implementation.
+ virtual TimeStamp Stat(const string& path);
+ virtual bool WriteFile(const string& path, const string& contents) {
+ assert(false);
+ return true;
+ }
+ virtual bool MakeDir(const string& path) {
+ assert(false);
+ return false;
+ }
+ virtual string ReadFile(const string& path, string* err) {
+ assert(false);
+ return "";
+ }
+ virtual int RemoveFile(const string& path) {
+ assert(false);
+ return 0;
+ }
+
+ DependencyScan scan_;
+ map<string, TimeStamp> mtimes_;
+ vector<string> stats_;
+};
+
+TimeStamp StatTest::Stat(const string& path) {
+ stats_.push_back(path);
+ map<string, TimeStamp>::iterator i = mtimes_.find(path);
+ if (i == mtimes_.end())
+ return 0; // File not found.
+ return i->second;
+}
+
+TEST_F(StatTest, Simple) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in\n"));
+
+ Node* out = GetNode("out");
+ out->Stat(this);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out->in_edge(), NULL);
+ ASSERT_EQ(2u, stats_.size());
+ ASSERT_EQ("out", stats_[0]);
+ ASSERT_EQ("in", stats_[1]);
+}
+
+TEST_F(StatTest, TwoStep) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"));
+
+ Node* out = GetNode("out");
+ out->Stat(this);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out->in_edge(), NULL);
+ ASSERT_EQ(3u, stats_.size());
+ ASSERT_EQ("out", stats_[0]);
+ ASSERT_TRUE(GetNode("out")->dirty());
+ ASSERT_EQ("mid", stats_[1]);
+ ASSERT_TRUE(GetNode("mid")->dirty());
+ ASSERT_EQ("in", stats_[2]);
+}
+
+TEST_F(StatTest, Tree) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid1 mid2\n"
+"build mid1: cat in11 in12\n"
+"build mid2: cat in21 in22\n"));
+
+ Node* out = GetNode("out");
+ out->Stat(this);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out->in_edge(), NULL);
+ ASSERT_EQ(1u + 6u, stats_.size());
+ ASSERT_EQ("mid1", stats_[1]);
+ ASSERT_TRUE(GetNode("mid1")->dirty());
+ ASSERT_EQ("in11", stats_[2]);
+}
+
+TEST_F(StatTest, Middle) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat mid\n"
+"build mid: cat in\n"));
+
+ mtimes_["in"] = 1;
+ mtimes_["mid"] = 0; // missing
+ mtimes_["out"] = 1;
+
+ Node* out = GetNode("out");
+ out->Stat(this);
+ ASSERT_EQ(1u, stats_.size());
+ scan_.RecomputeDirty(out->in_edge(), NULL);
+ ASSERT_FALSE(GetNode("in")->dirty());
+ ASSERT_TRUE(GetNode("mid")->dirty());
+ ASSERT_TRUE(GetNode("out")->dirty());
+}
+
+} // namespace
diff --git a/ninja/src/edit_distance.cc b/ninja/src/edit_distance.cc
new file mode 100644
index 00000000000..cc4483ffa5b
--- /dev/null
+++ b/ninja/src/edit_distance.cc
@@ -0,0 +1,66 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "edit_distance.h"
+
+#include <vector>
+
+int EditDistance(const StringPiece& s1,
+ const StringPiece& s2,
+ bool allow_replacements,
+ int max_edit_distance) {
+ // The algorithm implemented below is the "classic"
+ // dynamic-programming algorithm for computing the Levenshtein
+ // distance, which is described here:
+ //
+ // http://en.wikipedia.org/wiki/Levenshtein_distance
+ //
+ // Although the algorithm is typically described using an m x n
+ // array, only two rows are used at a time, so this implemenation
+ // just keeps two separate vectors for those two rows.
+ int m = s1.len_;
+ int n = s2.len_;
+
+ vector<int> previous(n + 1);
+ vector<int> current(n + 1);
+
+ for (int i = 0; i <= n; ++i)
+ previous[i] = i;
+
+ for (int y = 1; y <= m; ++y) {
+ current[0] = y;
+ int best_this_row = current[0];
+
+ for (int x = 1; x <= n; ++x) {
+ if (allow_replacements) {
+ current[x] = min(previous[x-1] + (s1.str_[y-1] == s2.str_[x-1] ? 0 : 1),
+ min(current[x-1], previous[x])+1);
+ }
+ else {
+ if (s1.str_[y-1] == s2.str_[x-1])
+ current[x] = previous[x-1];
+ else
+ current[x] = min(current[x-1], previous[x]) + 1;
+ }
+ best_this_row = min(best_this_row, current[x]);
+ }
+
+ if (max_edit_distance && best_this_row > max_edit_distance)
+ return max_edit_distance + 1;
+
+ current.swap(previous);
+ }
+
+ return previous[n];
+}
diff --git a/ninja/src/edit_distance.h b/ninja/src/edit_distance.h
new file mode 100644
index 00000000000..45ae4aecd31
--- /dev/null
+++ b/ninja/src/edit_distance.h
@@ -0,0 +1,25 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EDIT_DISTANCE_H_
+#define NINJA_EDIT_DISTANCE_H_
+
+#include "string_piece.h"
+
+int EditDistance(const StringPiece& s1,
+ const StringPiece& s2,
+ bool allow_replacements = true,
+ int max_edit_distance = 0);
+
+#endif // NINJA_EDIT_DISTANCE_H_
diff --git a/ninja/src/edit_distance_test.cc b/ninja/src/edit_distance_test.cc
new file mode 100644
index 00000000000..9dc0f827a75
--- /dev/null
+++ b/ninja/src/edit_distance_test.cc
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "edit_distance.h"
+
+#include "test.h"
+
+TEST(EditDistanceTest, TestEmpty) {
+ EXPECT_EQ(5, EditDistance("", "ninja"));
+ EXPECT_EQ(5, EditDistance("ninja", ""));
+ EXPECT_EQ(0, EditDistance("", ""));
+}
+
+TEST(EditDistanceTest, TestMaxDistance) {
+ const bool allow_replacements = true;
+ for (int max_distance = 1; max_distance < 7; ++max_distance) {
+ EXPECT_EQ(max_distance + 1,
+ EditDistance("abcdefghijklmnop", "ponmlkjihgfedcba",
+ allow_replacements, max_distance));
+ }
+}
+
+TEST(EditDistanceTest, TestAllowReplacements) {
+ bool allow_replacements = true;
+ EXPECT_EQ(1, EditDistance("ninja", "njnja", allow_replacements));
+ EXPECT_EQ(1, EditDistance("njnja", "ninja", allow_replacements));
+
+ allow_replacements = false;
+ EXPECT_EQ(2, EditDistance("ninja", "njnja", allow_replacements));
+ EXPECT_EQ(2, EditDistance("njnja", "ninja", allow_replacements));
+}
+
+TEST(EditDistanceTest, TestBasics) {
+ EXPECT_EQ(0, EditDistance("browser_tests", "browser_tests"));
+ EXPECT_EQ(1, EditDistance("browser_test", "browser_tests"));
+ EXPECT_EQ(1, EditDistance("browser_tests", "browser_test"));
+}
diff --git a/ninja/src/eval_env.cc b/ninja/src/eval_env.cc
new file mode 100644
index 00000000000..834b7e17c86
--- /dev/null
+++ b/ninja/src/eval_env.cc
@@ -0,0 +1,80 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "eval_env.h"
+
+string BindingEnv::LookupVariable(const string& var) {
+ map<string, string>::iterator i = bindings_.find(var);
+ if (i != bindings_.end())
+ return i->second;
+ if (parent_)
+ return parent_->LookupVariable(var);
+ return "";
+}
+
+void BindingEnv::AddBinding(const string& key, const string& val) {
+ bindings_[key] = val;
+}
+
+string BindingEnv::LookupWithFallback(const string& var,
+ const EvalString* eval,
+ Env* env) {
+ map<string, string>::iterator i = bindings_.find(var);
+ if (i != bindings_.end())
+ return i->second;
+
+ if (eval)
+ return eval->Evaluate(env);
+
+ if (parent_)
+ return parent_->LookupVariable(var);
+
+ return "";
+}
+
+string EvalString::Evaluate(Env* env) const {
+ string result;
+ for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) {
+ if (i->second == RAW)
+ result.append(i->first);
+ else
+ result.append(env->LookupVariable(i->first));
+ }
+ return result;
+}
+
+void EvalString::AddText(StringPiece text) {
+ // Add it to the end of an existing RAW token if possible.
+ if (!parsed_.empty() && parsed_.back().second == RAW) {
+ parsed_.back().first.append(text.str_, text.len_);
+ } else {
+ parsed_.push_back(make_pair(text.AsString(), RAW));
+ }
+}
+void EvalString::AddSpecial(StringPiece text) {
+ parsed_.push_back(make_pair(text.AsString(), SPECIAL));
+}
+
+string EvalString::Serialize() const {
+ string result;
+ for (TokenList::const_iterator i = parsed_.begin();
+ i != parsed_.end(); ++i) {
+ result.append("[");
+ if (i->second == SPECIAL)
+ result.append("$");
+ result.append(i->first);
+ result.append("]");
+ }
+ return result;
+}
diff --git a/ninja/src/eval_env.h b/ninja/src/eval_env.h
new file mode 100644
index 00000000000..f3c959acbae
--- /dev/null
+++ b/ninja/src/eval_env.h
@@ -0,0 +1,78 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EVAL_ENV_H_
+#define NINJA_EVAL_ENV_H_
+
+#include <map>
+#include <string>
+#include <vector>
+using namespace std;
+
+#include "string_piece.h"
+
+struct EvalString;
+
+/// An interface for a scope for variable (e.g. "$foo") lookups.
+struct Env {
+ virtual ~Env() {}
+ virtual string LookupVariable(const string& var) = 0;
+};
+
+/// An Env which contains a mapping of variables to values
+/// as well as a pointer to a parent scope.
+struct BindingEnv : public Env {
+ BindingEnv() : parent_(NULL) {}
+ explicit BindingEnv(Env* parent) : parent_(parent) {}
+
+ virtual ~BindingEnv() {}
+ virtual string LookupVariable(const string& var);
+
+ void AddBinding(const string& key, const string& val);
+
+ /// This is tricky. Edges want lookup scope to go in this order:
+ /// 1) value set on edge itself (edge_->env_)
+ /// 2) value set on rule, with expansion in the edge's scope
+ /// 3) value set on enclosing scope of edge (edge_->env_->parent_)
+ /// This function takes as parameters the necessary info to do (2).
+ string LookupWithFallback(const string& var, const EvalString* eval,
+ Env* env);
+
+private:
+ map<string, string> bindings_;
+ Env* parent_;
+};
+
+/// A tokenized string that contains variable references.
+/// Can be evaluated relative to an Env.
+struct EvalString {
+ string Evaluate(Env* env) const;
+
+ void Clear() { parsed_.clear(); }
+ bool empty() const { return parsed_.empty(); }
+
+ void AddText(StringPiece text);
+ void AddSpecial(StringPiece text);
+
+ /// Construct a human-readable representation of the parsed state,
+ /// for use in tests.
+ string Serialize() const;
+
+private:
+ enum TokenType { RAW, SPECIAL };
+ typedef vector<pair<string, TokenType> > TokenList;
+ TokenList parsed_;
+};
+
+#endif // NINJA_EVAL_ENV_H_
diff --git a/ninja/src/exit_status.h b/ninja/src/exit_status.h
new file mode 100644
index 00000000000..a714ece791f
--- /dev/null
+++ b/ninja/src/exit_status.h
@@ -0,0 +1,24 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EXIT_STATUS_H_
+#define NINJA_EXIT_STATUS_H_
+
+enum ExitStatus {
+ ExitSuccess,
+ ExitFailure,
+ ExitInterrupted
+};
+
+#endif // NINJA_EXIT_STATUS_H_
diff --git a/ninja/src/explain.cc b/ninja/src/explain.cc
new file mode 100644
index 00000000000..4e14c25a07a
--- /dev/null
+++ b/ninja/src/explain.cc
@@ -0,0 +1,15 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+bool g_explaining = false;
diff --git a/ninja/src/explain.h b/ninja/src/explain.h
new file mode 100644
index 00000000000..d4f6a6c2505
--- /dev/null
+++ b/ninja/src/explain.h
@@ -0,0 +1,27 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_EXPLAIN_H_
+#define NINJA_EXPLAIN_H_
+
+#include <stdio.h>
+
+#define EXPLAIN(fmt, ...) { \
+ if (g_explaining) \
+ fprintf(stderr, "ninja explain: " fmt "\n", __VA_ARGS__); \
+}
+
+extern bool g_explaining;
+
+#endif // NINJA_EXPLAIN_H_
diff --git a/ninja/src/gen_doxygen_mainpage.sh b/ninja/src/gen_doxygen_mainpage.sh
new file mode 100755
index 00000000000..d1599477ebd
--- /dev/null
+++ b/ninja/src/gen_doxygen_mainpage.sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+
+# Copyright 2011 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+
+STATUS=0
+
+# Print each of its arguments on stderr (one per line) prefixed by the
+# basename of this script.
+stderr()
+{
+ local me=$(basename "$0")
+ local i
+ for i
+ do
+ echo >&2 "$me: $i"
+ done
+}
+
+# Print each of its arguments on stderr (one per line) prefixed by the
+# basename of this script and 'error'.
+error()
+{
+ local i
+ for i
+ do
+ stderr "error: $i"
+ done
+ STATUS=1
+}
+
+generate_header()
+{
+ cat <<EOF
+/**
+ * \\mainpage
+EOF
+}
+
+generate_footer()
+{
+ cat <<EOF
+ */
+EOF
+}
+
+include_file()
+{
+ local file="$1"
+ if ! [ -r "$file" ]
+ then
+ error "'$file' is not readable."
+ return
+ fi
+ cat <<EOF
+ * \\section $file
+ * \\verbatim
+EOF
+ cat < "$file"
+ cat <<EOF
+ \\endverbatim
+EOF
+}
+
+if [ $# -eq 0 ]
+then
+ echo >&2 "usage: $0 inputs..."
+ exit 1
+fi
+
+generate_header
+for i in "$@"
+do
+ include_file "$i"
+done
+generate_footer
+
+exit $STATUS
diff --git a/ninja/src/getopt.c b/ninja/src/getopt.c
new file mode 100644
index 00000000000..75ef99cfb4e
--- /dev/null
+++ b/ninja/src/getopt.c
@@ -0,0 +1,408 @@
+/****************************************************************************
+
+getopt.c - Read command line options
+
+AUTHOR: Gregory Pietsch
+CREATED Fri Jan 10 21:13:05 1997
+
+DESCRIPTION:
+
+The getopt() function parses the command line arguments. Its arguments argc
+and argv are the argument count and array as passed to the main() function
+on program invocation. The argument optstring is a list of available option
+characters. If such a character is followed by a colon (`:'), the option
+takes an argument, which is placed in optarg. If such a character is
+followed by two colons, the option takes an optional argument, which is
+placed in optarg. If the option does not take an argument, optarg is NULL.
+
+The external variable optind is the index of the next array element of argv
+to be processed; it communicates from one call to the next which element to
+process.
+
+The getopt_long() function works like getopt() except that it also accepts
+long options started by two dashes `--'. If these take values, it is either
+in the form
+
+--arg=value
+
+ or
+
+--arg value
+
+It takes the additional arguments longopts which is a pointer to the first
+element of an array of type GETOPT_LONG_OPTION_T. The last element of the
+array has to be filled with NULL for the name field.
+
+The longind pointer points to the index of the current long option relative
+to longopts if it is non-NULL.
+
+The getopt() function returns the option character if the option was found
+successfully, `:' if there was a missing parameter for one of the options,
+`?' for an unknown option character, and EOF for the end of the option list.
+
+The getopt_long() function's return value is described in the header file.
+
+The function getopt_long_only() is identical to getopt_long(), except that a
+plus sign `+' can introduce long options as well as `--'.
+
+The following describes how to deal with options that follow non-option
+argv-elements.
+
+If the caller did not specify anything, the default is REQUIRE_ORDER if the
+environment variable POSIXLY_CORRECT is defined, PERMUTE otherwise.
+
+REQUIRE_ORDER means don't recognize them as options; stop option processing
+when the first non-option is seen. This is what Unix does. This mode of
+operation is selected by either setting the environment variable
+POSIXLY_CORRECT, or using `+' as the first character of the optstring
+parameter.
+
+PERMUTE is the default. We permute the contents of ARGV as we scan, so that
+eventually all the non-options are at the end. This allows options to be
+given in any order, even with programs that were not written to expect this.
+
+RETURN_IN_ORDER is an option available to programs that were written to
+expect options and other argv-elements in any order and that care about the
+ordering of the two. We describe each non-option argv-element as if it were
+the argument of an option with character code 1. Using `-' as the first
+character of the optstring parameter selects this mode of operation.
+
+The special argument `--' forces an end of option-scanning regardless of the
+value of ordering. In the case of RETURN_IN_ORDER, only `--' can cause
+getopt() and friends to return EOF with optind != argc.
+
+COPYRIGHT NOTICE AND DISCLAIMER:
+
+Copyright (C) 1997 Gregory Pietsch
+
+This file and the accompanying getopt.h header file are hereby placed in the
+public domain without restrictions. Just give the author credit, don't
+claim you wrote it or prevent anyone else from using it.
+
+Gregory Pietsch's current e-mail address:
+gpietsch@comcast.net
+****************************************************************************/
+
+/* include files */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef GETOPT_H
+#include "getopt.h"
+#endif
+
+/* macros */
+
+/* types */
+typedef enum GETOPT_ORDERING_T
+{
+ PERMUTE,
+ RETURN_IN_ORDER,
+ REQUIRE_ORDER
+} GETOPT_ORDERING_T;
+
+/* globally-defined variables */
+char *optarg = NULL;
+int optind = 0;
+int opterr = 1;
+int optopt = '?';
+
+/* functions */
+
+/* reverse_argv_elements: reverses num elements starting at argv */
+static void
+reverse_argv_elements (char **argv, int num)
+{
+ int i;
+ char *tmp;
+
+ for (i = 0; i < (num >> 1); i++)
+ {
+ tmp = argv[i];
+ argv[i] = argv[num - i - 1];
+ argv[num - i - 1] = tmp;
+ }
+}
+
+/* permute: swap two blocks of argv-elements given their lengths */
+static void
+permute (char **argv, int len1, int len2)
+{
+ reverse_argv_elements (argv, len1);
+ reverse_argv_elements (argv, len1 + len2);
+ reverse_argv_elements (argv, len2);
+}
+
+/* is_option: is this argv-element an option or the end of the option list? */
+static int
+is_option (char *argv_element, int only)
+{
+ return ((argv_element == NULL)
+ || (argv_element[0] == '-') || (only && argv_element[0] == '+'));
+}
+
+/* getopt_internal: the function that does all the dirty work */
+static int
+getopt_internal (int argc, char **argv, char *shortopts,
+ GETOPT_LONG_OPTION_T * longopts, int *longind, int only)
+{
+ GETOPT_ORDERING_T ordering = PERMUTE;
+ static size_t optwhere = 0;
+ size_t permute_from = 0;
+ int num_nonopts = 0;
+ int optindex = 0;
+ size_t match_chars = 0;
+ char *possible_arg = NULL;
+ int longopt_match = -1;
+ int has_arg = -1;
+ char *cp = NULL;
+ int arg_next = 0;
+
+ /* first, deal with silly parameters and easy stuff */
+ if (argc == 0 || argv == NULL || (shortopts == NULL && longopts == NULL))
+ return (optopt = '?');
+ if (optind >= argc || argv[optind] == NULL)
+ return EOF;
+ if (strcmp (argv[optind], "--") == 0)
+ {
+ optind++;
+ return EOF;
+ }
+ /* if this is our first time through */
+ if (optind == 0)
+ optind = optwhere = 1;
+
+ /* define ordering */
+ if (shortopts != NULL && (*shortopts == '-' || *shortopts == '+'))
+ {
+ ordering = (*shortopts == '-') ? RETURN_IN_ORDER : REQUIRE_ORDER;
+ shortopts++;
+ }
+ else
+ ordering = (getenv ("POSIXLY_CORRECT") != NULL) ? REQUIRE_ORDER : PERMUTE;
+
+ /*
+ * based on ordering, find our next option, if we're at the beginning of
+ * one
+ */
+ if (optwhere == 1)
+ {
+ switch (ordering)
+ {
+ case PERMUTE:
+ permute_from = optind;
+ num_nonopts = 0;
+ while (!is_option (argv[optind], only))
+ {
+ optind++;
+ num_nonopts++;
+ }
+ if (argv[optind] == NULL)
+ {
+ /* no more options */
+ optind = permute_from;
+ return EOF;
+ }
+ else if (strcmp (argv[optind], "--") == 0)
+ {
+ /* no more options, but have to get `--' out of the way */
+ permute (argv + permute_from, num_nonopts, 1);
+ optind = permute_from + 1;
+ return EOF;
+ }
+ break;
+ case RETURN_IN_ORDER:
+ if (!is_option (argv[optind], only))
+ {
+ optarg = argv[optind++];
+ return (optopt = 1);
+ }
+ break;
+ case REQUIRE_ORDER:
+ if (!is_option (argv[optind], only))
+ return EOF;
+ break;
+ }
+ }
+ /* we've got an option, so parse it */
+
+ /* first, is it a long option? */
+ if (longopts != NULL
+ && (memcmp (argv[optind], "--", 2) == 0
+ || (only && argv[optind][0] == '+')) && optwhere == 1)
+ {
+ /* handle long options */
+ if (memcmp (argv[optind], "--", 2) == 0)
+ optwhere = 2;
+ longopt_match = -1;
+ possible_arg = strchr (argv[optind] + optwhere, '=');
+ if (possible_arg == NULL)
+ {
+ /* no =, so next argv might be arg */
+ match_chars = strlen (argv[optind]);
+ possible_arg = argv[optind] + match_chars;
+ match_chars = match_chars - optwhere;
+ }
+ else
+ match_chars = (possible_arg - argv[optind]) - optwhere;
+ for (optindex = 0; longopts[optindex].name != NULL; optindex++)
+ {
+ if (memcmp (argv[optind] + optwhere,
+ longopts[optindex].name, match_chars) == 0)
+ {
+ /* do we have an exact match? */
+ if (match_chars == strlen (longopts[optindex].name))
+ {
+ longopt_match = optindex;
+ break;
+ }
+ /* do any characters match? */
+ else
+ {
+ if (longopt_match < 0)
+ longopt_match = optindex;
+ else
+ {
+ /* we have ambiguous options */
+ if (opterr)
+ fprintf (stderr, "%s: option `%s' is ambiguous "
+ "(could be `--%s' or `--%s')\n",
+ argv[0],
+ argv[optind],
+ longopts[longopt_match].name,
+ longopts[optindex].name);
+ return (optopt = '?');
+ }
+ }
+ }
+ }
+ if (longopt_match >= 0)
+ has_arg = longopts[longopt_match].has_arg;
+ }
+ /* if we didn't find a long option, is it a short option? */
+ if (longopt_match < 0 && shortopts != NULL)
+ {
+ cp = strchr (shortopts, argv[optind][optwhere]);
+ if (cp == NULL)
+ {
+ /* couldn't find option in shortopts */
+ if (opterr)
+ fprintf (stderr,
+ "%s: invalid option -- `-%c'\n",
+ argv[0], argv[optind][optwhere]);
+ optwhere++;
+ if (argv[optind][optwhere] == '\0')
+ {
+ optind++;
+ optwhere = 1;
+ }
+ return (optopt = '?');
+ }
+ has_arg = ((cp[1] == ':')
+ ? ((cp[2] == ':') ? OPTIONAL_ARG : REQUIRED_ARG) : no_argument);
+ possible_arg = argv[optind] + optwhere + 1;
+ optopt = *cp;
+ }
+ /* get argument and reset optwhere */
+ arg_next = 0;
+ switch (has_arg)
+ {
+ case OPTIONAL_ARG:
+ if (*possible_arg == '=')
+ possible_arg++;
+ if (*possible_arg != '\0')
+ {
+ optarg = possible_arg;
+ optwhere = 1;
+ }
+ else
+ optarg = NULL;
+ break;
+ case REQUIRED_ARG:
+ if (*possible_arg == '=')
+ possible_arg++;
+ if (*possible_arg != '\0')
+ {
+ optarg = possible_arg;
+ optwhere = 1;
+ }
+ else if (optind + 1 >= argc)
+ {
+ if (opterr)
+ {
+ fprintf (stderr, "%s: argument required for option `", argv[0]);
+ if (longopt_match >= 0)
+ fprintf (stderr, "--%s'\n", longopts[longopt_match].name);
+ else
+ fprintf (stderr, "-%c'\n", *cp);
+ }
+ optind++;
+ return (optopt = ':');
+ }
+ else
+ {
+ optarg = argv[optind + 1];
+ arg_next = 1;
+ optwhere = 1;
+ }
+ break;
+ case no_argument:
+ if (longopt_match < 0)
+ {
+ optwhere++;
+ if (argv[optind][optwhere] == '\0')
+ optwhere = 1;
+ }
+ else
+ optwhere = 1;
+ optarg = NULL;
+ break;
+ }
+
+ /* do we have to permute or otherwise modify optind? */
+ if (ordering == PERMUTE && optwhere == 1 && num_nonopts != 0)
+ {
+ permute (argv + permute_from, num_nonopts, 1 + arg_next);
+ optind = permute_from + 1 + arg_next;
+ }
+ else if (optwhere == 1)
+ optind = optind + 1 + arg_next;
+
+ /* finally return */
+ if (longopt_match >= 0)
+ {
+ if (longind != NULL)
+ *longind = longopt_match;
+ if (longopts[longopt_match].flag != NULL)
+ {
+ *(longopts[longopt_match].flag) = longopts[longopt_match].val;
+ return 0;
+ }
+ else
+ return longopts[longopt_match].val;
+ }
+ else
+ return optopt;
+}
+
+int
+getopt (int argc, char **argv, char *optstring)
+{
+ return getopt_internal (argc, argv, optstring, NULL, NULL, 0);
+}
+
+int
+getopt_long (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind)
+{
+ return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 0);
+}
+
+int
+getopt_long_only (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind)
+{
+ return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 1);
+}
+
+/* end of file GETOPT.C */
diff --git a/ninja/src/getopt.h b/ninja/src/getopt.h
new file mode 100644
index 00000000000..ead9878b9a8
--- /dev/null
+++ b/ninja/src/getopt.h
@@ -0,0 +1,55 @@
+#ifndef GETOPT_H
+#define GETOPT_H
+
+/* include files needed by this include file */
+
+/* macros defined by this include file */
+#define no_argument 0
+#define REQUIRED_ARG 1
+#define OPTIONAL_ARG 2
+
+/* types defined by this include file */
+
+/* GETOPT_LONG_OPTION_T: The type of long option */
+typedef struct GETOPT_LONG_OPTION_T
+{
+ const char *name; /* the name of the long option */
+ int has_arg; /* one of the above macros */
+ int *flag; /* determines if getopt_long() returns a
+ * value for a long option; if it is
+ * non-NULL, 0 is returned as a function
+ * value and the value of val is stored in
+ * the area pointed to by flag. Otherwise,
+ * val is returned. */
+ int val; /* determines the value to return if flag is
+ * NULL. */
+} GETOPT_LONG_OPTION_T;
+
+typedef GETOPT_LONG_OPTION_T option;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+ /* externally-defined variables */
+ extern char *optarg;
+ extern int optind;
+ extern int opterr;
+ extern int optopt;
+
+ /* function prototypes */
+ int getopt (int argc, char **argv, char *optstring);
+ int getopt_long (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind);
+ int getopt_long_only (int argc, char **argv, const char *shortopts,
+ const GETOPT_LONG_OPTION_T * longopts, int *longind);
+
+#ifdef __cplusplus
+};
+
+#endif
+
+#endif /* GETOPT_H */
+
+/* END OF FILE getopt.h */
diff --git a/ninja/src/graph.cc b/ninja/src/graph.cc
new file mode 100644
index 00000000000..fdd93de9f4c
--- /dev/null
+++ b/ninja/src/graph.cc
@@ -0,0 +1,451 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graph.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "build_log.h"
+#include "depfile_parser.h"
+#include "deps_log.h"
+#include "disk_interface.h"
+#include "explain.h"
+#include "manifest_parser.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+
+bool Node::Stat(DiskInterface* disk_interface) {
+ METRIC_RECORD("node stat");
+ mtime_ = disk_interface->Stat(path_);
+ return mtime_ > 0;
+}
+
+void Rule::AddBinding(const string& key, const EvalString& val) {
+ bindings_[key] = val;
+}
+
+const EvalString* Rule::GetBinding(const string& key) const {
+ map<string, EvalString>::const_iterator i = bindings_.find(key);
+ if (i == bindings_.end())
+ return NULL;
+ return &i->second;
+}
+
+// static
+bool Rule::IsReservedBinding(const string& var) {
+ return var == "command" ||
+ var == "depfile" ||
+ var == "description" ||
+ var == "deps" ||
+ var == "generator" ||
+ var == "pool" ||
+ var == "restat" ||
+ var == "rspfile" ||
+ var == "rspfile_content";
+}
+
+bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
+ bool dirty = false;
+ edge->outputs_ready_ = true;
+
+ TimeStamp deps_mtime = 0;
+ if (!dep_loader_.LoadDeps(edge, &deps_mtime, err)) {
+ if (!err->empty())
+ return false;
+ // Failed to load dependency info: rebuild to regenerate it.
+ dirty = true;
+ }
+
+ // Visit all inputs; we're dirty if any of the inputs are dirty.
+ Node* most_recent_input = NULL;
+ for (vector<Node*>::iterator i = edge->inputs_.begin();
+ i != edge->inputs_.end(); ++i) {
+ if ((*i)->StatIfNecessary(disk_interface_)) {
+ if (Edge* in_edge = (*i)->in_edge()) {
+ if (!RecomputeDirty(in_edge, err))
+ return false;
+ } else {
+ // This input has no in-edge; it is dirty if it is missing.
+ if (!(*i)->exists())
+ EXPLAIN("%s has no in-edge and is missing", (*i)->path().c_str());
+ (*i)->set_dirty(!(*i)->exists());
+ }
+ }
+
+ // If an input is not ready, neither are our outputs.
+ if (Edge* in_edge = (*i)->in_edge()) {
+ if (!in_edge->outputs_ready_)
+ edge->outputs_ready_ = false;
+ }
+
+ if (!edge->is_order_only(i - edge->inputs_.begin())) {
+ // If a regular input is dirty (or missing), we're dirty.
+ // Otherwise consider mtime.
+ if ((*i)->dirty()) {
+ EXPLAIN("%s is dirty", (*i)->path().c_str());
+ dirty = true;
+ } else {
+ if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime()) {
+ most_recent_input = *i;
+ }
+ }
+ }
+ }
+
+ // We may also be dirty due to output state: missing outputs, out of
+ // date outputs, etc. Visit all outputs and determine whether they're dirty.
+ if (!dirty) {
+ string command = edge->EvaluateCommand(true);
+
+ for (vector<Node*>::iterator i = edge->outputs_.begin();
+ i != edge->outputs_.end(); ++i) {
+ (*i)->StatIfNecessary(disk_interface_);
+ if (RecomputeOutputDirty(edge, most_recent_input, deps_mtime,
+ command, *i)) {
+ dirty = true;
+ break;
+ }
+ }
+ }
+
+ // Finally, visit each output to mark off that we've visited it, and update
+ // their dirty state if necessary.
+ for (vector<Node*>::iterator i = edge->outputs_.begin();
+ i != edge->outputs_.end(); ++i) {
+ (*i)->StatIfNecessary(disk_interface_);
+ if (dirty)
+ (*i)->MarkDirty();
+ }
+
+ // If an edge is dirty, its outputs are normally not ready. (It's
+ // possible to be clean but still not be ready in the presence of
+ // order-only inputs.)
+ // But phony edges with no inputs have nothing to do, so are always
+ // ready.
+ if (dirty && !(edge->is_phony() && edge->inputs_.empty()))
+ edge->outputs_ready_ = false;
+
+ return true;
+}
+
+bool DependencyScan::RecomputeOutputDirty(Edge* edge,
+ Node* most_recent_input,
+ TimeStamp deps_mtime,
+ const string& command,
+ Node* output) {
+ if (edge->is_phony()) {
+ // Phony edges don't write any output. Outputs are only dirty if
+ // there are no inputs and we're missing the output.
+ return edge->inputs_.empty() && !output->exists();
+ }
+
+ BuildLog::LogEntry* entry = 0;
+
+ // Dirty if we're missing the output.
+ if (!output->exists()) {
+ EXPLAIN("output %s doesn't exist", output->path().c_str());
+ return true;
+ }
+
+ // Dirty if the output is older than the input.
+ if (most_recent_input && output->mtime() < most_recent_input->mtime()) {
+ TimeStamp output_mtime = output->mtime();
+
+ // If this is a restat rule, we may have cleaned the output with a restat
+ // rule in a previous run and stored the most recent input mtime in the
+ // build log. Use that mtime instead, so that the file will only be
+ // considered dirty if an input was modified since the previous run.
+ bool used_restat = false;
+ if (edge->GetBindingBool("restat") && build_log() &&
+ (entry = build_log()->LookupByOutput(output->path()))) {
+ output_mtime = entry->restat_mtime;
+ used_restat = true;
+ }
+
+ if (output_mtime < most_recent_input->mtime()) {
+ EXPLAIN("%soutput %s older than most recent input %s "
+ "(%d vs %d)",
+ used_restat ? "restat of " : "", output->path().c_str(),
+ most_recent_input->path().c_str(),
+ output_mtime, most_recent_input->mtime());
+ return true;
+ }
+ }
+
+ // Dirty if the output is newer than the deps.
+ if (deps_mtime && output->mtime() > deps_mtime) {
+ EXPLAIN("stored deps info out of date for for %s (%d vs %d)",
+ output->path().c_str(), deps_mtime, output->mtime());
+ return true;
+ }
+
+ // May also be dirty due to the command changing since the last build.
+ // But if this is a generator rule, the command changing does not make us
+ // dirty.
+ if (!edge->GetBindingBool("generator") && build_log()) {
+ if (entry || (entry = build_log()->LookupByOutput(output->path()))) {
+ if (BuildLog::LogEntry::HashCommand(command) != entry->command_hash) {
+ EXPLAIN("command line changed for %s", output->path().c_str());
+ return true;
+ }
+ }
+ if (!entry) {
+ EXPLAIN("command line not found in log for %s", output->path().c_str());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool Edge::AllInputsReady() const {
+ for (vector<Node*>::const_iterator i = inputs_.begin();
+ i != inputs_.end(); ++i) {
+ if ((*i)->in_edge() && !(*i)->in_edge()->outputs_ready())
+ return false;
+ }
+ return true;
+}
+
+/// An Env for an Edge, providing $in and $out.
+struct EdgeEnv : public Env {
+ explicit EdgeEnv(Edge* edge) : edge_(edge) {}
+ virtual string LookupVariable(const string& var);
+
+ /// Given a span of Nodes, construct a list of paths suitable for a command
+ /// line.
+ string MakePathList(vector<Node*>::iterator begin,
+ vector<Node*>::iterator end,
+ char sep);
+
+ Edge* edge_;
+};
+
+string EdgeEnv::LookupVariable(const string& var) {
+ if (var == "in" || var == "in_newline") {
+ int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ -
+ edge_->order_only_deps_;
+ return MakePathList(edge_->inputs_.begin(),
+ edge_->inputs_.begin() + explicit_deps_count,
+ var == "in" ? ' ' : '\n');
+ } else if (var == "out") {
+ return MakePathList(edge_->outputs_.begin(),
+ edge_->outputs_.end(),
+ ' ');
+ }
+
+ // See notes on BindingEnv::LookupWithFallback.
+ const EvalString* eval = edge_->rule_->GetBinding(var);
+ return edge_->env_->LookupWithFallback(var, eval, this);
+}
+
+string EdgeEnv::MakePathList(vector<Node*>::iterator begin,
+ vector<Node*>::iterator end,
+ char sep) {
+ string result;
+ for (vector<Node*>::iterator i = begin; i != end; ++i) {
+ if (!result.empty())
+ result.push_back(sep);
+ const string& path = (*i)->path();
+ if (path.find(" ") != string::npos) {
+ result.append("\"");
+ result.append(path);
+ result.append("\"");
+ } else {
+ result.append(path);
+ }
+ }
+ return result;
+}
+
+string Edge::EvaluateCommand(bool incl_rsp_file) {
+ string command = GetBinding("command");
+ if (incl_rsp_file) {
+ string rspfile_content = GetBinding("rspfile_content");
+ if (!rspfile_content.empty())
+ command += ";rspfile=" + rspfile_content;
+ }
+ return command;
+}
+
+string Edge::GetBinding(const string& key) {
+ EdgeEnv env(this);
+ return env.LookupVariable(key);
+}
+
+bool Edge::GetBindingBool(const string& key) {
+ return !GetBinding(key).empty();
+}
+
+void Edge::Dump(const char* prefix) const {
+ printf("%s[ ", prefix);
+ for (vector<Node*>::const_iterator i = inputs_.begin();
+ i != inputs_.end() && *i != NULL; ++i) {
+ printf("%s ", (*i)->path().c_str());
+ }
+ printf("--%s-> ", rule_->name().c_str());
+ for (vector<Node*>::const_iterator i = outputs_.begin();
+ i != outputs_.end() && *i != NULL; ++i) {
+ printf("%s ", (*i)->path().c_str());
+ }
+ if (pool_) {
+ if (!pool_->name().empty()) {
+ printf("(in pool '%s')", pool_->name().c_str());
+ }
+ } else {
+ printf("(null pool?)");
+ }
+ printf("] 0x%p\n", this);
+}
+
+bool Edge::is_phony() const {
+ return rule_ == &State::kPhonyRule;
+}
+
+void Node::Dump(const char* prefix) const {
+ printf("%s <%s 0x%p> mtime: %d%s, (:%s), ",
+ prefix, path().c_str(), this,
+ mtime(), mtime() ? "" : " (:missing)",
+ dirty() ? " dirty" : " clean");
+ if (in_edge()) {
+ in_edge()->Dump("in-edge: ");
+ } else {
+ printf("no in-edge\n");
+ }
+ printf(" out edges:\n");
+ for (vector<Edge*>::const_iterator e = out_edges().begin();
+ e != out_edges().end() && *e != NULL; ++e) {
+ (*e)->Dump(" +- ");
+ }
+}
+
+bool ImplicitDepLoader::LoadDeps(Edge* edge, TimeStamp* mtime, string* err) {
+ string deps_type = edge->GetBinding("deps");
+ if (!deps_type.empty()) {
+ if (!LoadDepsFromLog(edge, mtime, err)) {
+ if (!err->empty())
+ return false;
+ EXPLAIN("deps for %s are missing", edge->outputs_[0]->path().c_str());
+ return false;
+ }
+ return true;
+ }
+
+ string depfile = edge->GetBinding("depfile");
+ if (!depfile.empty()) {
+ if (!LoadDepFile(edge, depfile, err)) {
+ if (!err->empty())
+ return false;
+ EXPLAIN("depfile '%s' is missing", depfile.c_str());
+ return false;
+ }
+ return true;
+ }
+
+ // No deps to load.
+ return true;
+}
+
+bool ImplicitDepLoader::LoadDepFile(Edge* edge, const string& path,
+ string* err) {
+ METRIC_RECORD("depfile load");
+ string content = disk_interface_->ReadFile(path, err);
+ if (!err->empty()) {
+ *err = "loading '" + path + "': " + *err;
+ return false;
+ }
+ // On a missing depfile: return false and empty *err.
+ if (content.empty())
+ return false;
+
+ DepfileParser depfile;
+ string depfile_err;
+ if (!depfile.Parse(&content, &depfile_err)) {
+ *err = path + ": " + depfile_err;
+ return false;
+ }
+
+ // Check that this depfile matches the edge's output.
+ Node* first_output = edge->outputs_[0];
+ StringPiece opath = StringPiece(first_output->path());
+ if (opath != depfile.out_) {
+ *err = "expected depfile '" + path + "' to mention '" +
+ first_output->path() + "', got '" + depfile.out_.AsString() + "'";
+ return false;
+ }
+
+ // Preallocate space in edge->inputs_ to be filled in below.
+ vector<Node*>::iterator implicit_dep =
+ PreallocateSpace(edge, depfile.ins_.size());
+
+ // Add all its in-edges.
+ for (vector<StringPiece>::iterator i = depfile.ins_.begin();
+ i != depfile.ins_.end(); ++i, ++implicit_dep) {
+ if (!CanonicalizePath(const_cast<char*>(i->str_), &i->len_, err))
+ return false;
+
+ Node* node = state_->GetNode(*i);
+ *implicit_dep = node;
+ node->AddOutEdge(edge);
+ CreatePhonyInEdge(node);
+ }
+
+ return true;
+}
+
+bool ImplicitDepLoader::LoadDepsFromLog(Edge* edge, TimeStamp* deps_mtime,
+ string* err) {
+ DepsLog::Deps* deps = deps_log_->GetDeps(edge->outputs_[0]);
+ if (!deps)
+ return false;
+
+ *deps_mtime = deps->mtime;
+
+ vector<Node*>::iterator implicit_dep =
+ PreallocateSpace(edge, deps->node_count);
+ for (int i = 0; i < deps->node_count; ++i, ++implicit_dep) {
+ Node* node = deps->nodes[i];
+ *implicit_dep = node;
+ node->AddOutEdge(edge);
+ CreatePhonyInEdge(node);
+ }
+ return true;
+}
+
+vector<Node*>::iterator ImplicitDepLoader::PreallocateSpace(Edge* edge,
+ int count) {
+ edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_,
+ (size_t)count, 0);
+ edge->implicit_deps_ += count;
+ return edge->inputs_.end() - edge->order_only_deps_ - count;
+}
+
+void ImplicitDepLoader::CreatePhonyInEdge(Node* node) {
+ if (node->in_edge())
+ return;
+
+ Edge* phony_edge = state_->AddEdge(&State::kPhonyRule);
+ node->set_in_edge(phony_edge);
+ phony_edge->outputs_.push_back(node);
+
+ // RecomputeDirty might not be called for phony_edge if a previous call
+ // to RecomputeDirty had caused the file to be stat'ed. Because previous
+ // invocations of RecomputeDirty would have seen this node without an
+ // input edge (and therefore ready), we have to set outputs_ready_ to true
+ // to avoid a potential stuck build. If we do call RecomputeDirty for
+ // this node, it will simply set outputs_ready_ to the correct value.
+ phony_edge->outputs_ready_ = true;
+}
diff --git a/ninja/src/graph.h b/ninja/src/graph.h
new file mode 100644
index 00000000000..428ba01e65d
--- /dev/null
+++ b/ninja/src/graph.h
@@ -0,0 +1,266 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_GRAPH_H_
+#define NINJA_GRAPH_H_
+
+#include <string>
+#include <vector>
+using namespace std;
+
+#include "eval_env.h"
+#include "timestamp.h"
+
+struct BuildLog;
+struct DiskInterface;
+struct DepsLog;
+struct Edge;
+struct Node;
+struct Pool;
+struct State;
+
+/// Information about a node in the dependency graph: the file, whether
+/// it's dirty, mtime, etc.
+struct Node {
+ explicit Node(const string& path)
+ : path_(path),
+ mtime_(-1),
+ dirty_(false),
+ in_edge_(NULL),
+ id_(-1) {}
+
+ /// Return true if the file exists (mtime_ got a value).
+ bool Stat(DiskInterface* disk_interface);
+
+ /// Return true if we needed to stat.
+ bool StatIfNecessary(DiskInterface* disk_interface) {
+ if (status_known())
+ return false;
+ Stat(disk_interface);
+ return true;
+ }
+
+ /// Mark as not-yet-stat()ed and not dirty.
+ void ResetState() {
+ mtime_ = -1;
+ dirty_ = false;
+ }
+
+ /// Mark the Node as already-stat()ed and missing.
+ void MarkMissing() {
+ mtime_ = 0;
+ }
+
+ bool exists() const {
+ return mtime_ != 0;
+ }
+
+ bool status_known() const {
+ return mtime_ != -1;
+ }
+
+ const string& path() const { return path_; }
+ TimeStamp mtime() const { return mtime_; }
+
+ bool dirty() const { return dirty_; }
+ void set_dirty(bool dirty) { dirty_ = dirty; }
+ void MarkDirty() { dirty_ = true; }
+
+ Edge* in_edge() const { return in_edge_; }
+ void set_in_edge(Edge* edge) { in_edge_ = edge; }
+
+ int id() const { return id_; }
+ void set_id(int id) { id_ = id; }
+
+ const vector<Edge*>& out_edges() const { return out_edges_; }
+ void AddOutEdge(Edge* edge) { out_edges_.push_back(edge); }
+
+ void Dump(const char* prefix="") const;
+
+private:
+ string path_;
+ /// Possible values of mtime_:
+ /// -1: file hasn't been examined
+ /// 0: we looked, and file doesn't exist
+ /// >0: actual file's mtime
+ TimeStamp mtime_;
+
+ /// Dirty is true when the underlying file is out-of-date.
+ /// But note that Edge::outputs_ready_ is also used in judging which
+ /// edges to build.
+ bool dirty_;
+
+ /// The Edge that produces this Node, or NULL when there is no
+ /// known edge to produce it.
+ Edge* in_edge_;
+
+ /// All Edges that use this Node as an input.
+ vector<Edge*> out_edges_;
+
+ /// A dense integer id for the node, assigned and used by DepsLog.
+ int id_;
+};
+
+/// An invokable build command and associated metadata (description, etc.).
+struct Rule {
+ explicit Rule(const string& name) : name_(name) {}
+
+ const string& name() const { return name_; }
+
+ typedef map<string, EvalString> Bindings;
+ void AddBinding(const string& key, const EvalString& val);
+
+ static bool IsReservedBinding(const string& var);
+
+ const EvalString* GetBinding(const string& key) const;
+
+ private:
+ // Allow the parsers to reach into this object and fill out its fields.
+ friend struct ManifestParser;
+
+ string name_;
+ map<string, EvalString> bindings_;
+};
+
+/// An edge in the dependency graph; links between Nodes using Rules.
+struct Edge {
+ Edge() : rule_(NULL), env_(NULL), outputs_ready_(false), implicit_deps_(0),
+ order_only_deps_(0) {}
+
+ /// Return true if all inputs' in-edges are ready.
+ bool AllInputsReady() const;
+
+ /// Expand all variables in a command and return it as a string.
+ /// If incl_rsp_file is enabled, the string will also contain the
+ /// full contents of a response file (if applicable)
+ string EvaluateCommand(bool incl_rsp_file = false);
+
+ string GetBinding(const string& key);
+ bool GetBindingBool(const string& key);
+
+ void Dump(const char* prefix="") const;
+
+ const Rule* rule_;
+ Pool* pool_;
+ vector<Node*> inputs_;
+ vector<Node*> outputs_;
+ BindingEnv* env_;
+ bool outputs_ready_;
+
+ const Rule& rule() const { return *rule_; }
+ Pool* pool() const { return pool_; }
+ int weight() const { return 1; }
+ bool outputs_ready() const { return outputs_ready_; }
+
+ // There are three types of inputs.
+ // 1) explicit deps, which show up as $in on the command line;
+ // 2) implicit deps, which the target depends on implicitly (e.g. C headers),
+ // and changes in them cause the target to rebuild;
+ // 3) order-only deps, which are needed before the target builds but which
+ // don't cause the target to rebuild.
+ // These are stored in inputs_ in that order, and we keep counts of
+ // #2 and #3 when we need to access the various subsets.
+ int implicit_deps_;
+ int order_only_deps_;
+ bool is_implicit(size_t index) {
+ return index >= inputs_.size() - order_only_deps_ - implicit_deps_ &&
+ !is_order_only(index);
+ }
+ bool is_order_only(size_t index) {
+ return index >= inputs_.size() - order_only_deps_;
+ }
+
+ bool is_phony() const;
+};
+
+
+/// ImplicitDepLoader loads implicit dependencies, as referenced via the
+/// "depfile" attribute in build files.
+struct ImplicitDepLoader {
+ ImplicitDepLoader(State* state, DepsLog* deps_log,
+ DiskInterface* disk_interface)
+ : state_(state), disk_interface_(disk_interface), deps_log_(deps_log) {}
+
+ /// Load implicit dependencies for \a edge. May fill in \a mtime with
+ /// the timestamp of the loaded information.
+ /// @return false on error (without filling \a err if info is just missing).
+ bool LoadDeps(Edge* edge, TimeStamp* mtime, string* err);
+
+ DepsLog* deps_log() const {
+ return deps_log_;
+ }
+
+ private:
+ /// Load implicit dependencies for \a edge from a depfile attribute.
+ /// @return false on error (without filling \a err if info is just missing).
+ bool LoadDepFile(Edge* edge, const string& path, string* err);
+
+ /// Load implicit dependencies for \a edge from the DepsLog.
+ /// @return false on error (without filling \a err if info is just missing).
+ bool LoadDepsFromLog(Edge* edge, TimeStamp* mtime, string* err);
+
+ /// Preallocate \a count spaces in the input array on \a edge, returning
+ /// an iterator pointing at the first new space.
+ vector<Node*>::iterator PreallocateSpace(Edge* edge, int count);
+
+ /// If we don't have a edge that generates this input already,
+ /// create one; this makes us not abort if the input is missing,
+ /// but instead will rebuild in that circumstance.
+ void CreatePhonyInEdge(Node* node);
+
+ State* state_;
+ DiskInterface* disk_interface_;
+ DepsLog* deps_log_;
+};
+
+
+/// DependencyScan manages the process of scanning the files in a graph
+/// and updating the dirty/outputs_ready state of all the nodes and edges.
+struct DependencyScan {
+ DependencyScan(State* state, BuildLog* build_log, DepsLog* deps_log,
+ DiskInterface* disk_interface)
+ : build_log_(build_log),
+ disk_interface_(disk_interface),
+ dep_loader_(state, deps_log, disk_interface) {}
+
+ /// Examine inputs, outputs, and command lines to judge whether an edge
+ /// needs to be re-run, and update outputs_ready_ and each outputs' |dirty_|
+ /// state accordingly.
+ /// Returns false on failure.
+ bool RecomputeDirty(Edge* edge, string* err);
+
+ /// Recompute whether a given single output should be marked dirty.
+ /// Returns true if so.
+ bool RecomputeOutputDirty(Edge* edge, Node* most_recent_input,
+ TimeStamp deps_mtime,
+ const string& command, Node* output);
+
+ BuildLog* build_log() const {
+ return build_log_;
+ }
+ void set_build_log(BuildLog* log) {
+ build_log_ = log;
+ }
+
+ DepsLog* deps_log() const {
+ return dep_loader_.deps_log();
+ }
+
+ private:
+ BuildLog* build_log_;
+ DiskInterface* disk_interface_;
+ ImplicitDepLoader dep_loader_;
+};
+
+#endif // NINJA_GRAPH_H_
diff --git a/ninja/src/graph_test.cc b/ninja/src/graph_test.cc
new file mode 100644
index 00000000000..63d5757da92
--- /dev/null
+++ b/ninja/src/graph_test.cc
@@ -0,0 +1,228 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graph.h"
+
+#include "test.h"
+
+struct GraphTest : public StateTestWithBuiltinRules {
+ GraphTest() : scan_(&state_, NULL, NULL, &fs_) {}
+
+ VirtualFileSystem fs_;
+ DependencyScan scan_;
+};
+
+TEST_F(GraphTest, MissingImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in | implicit\n"));
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+
+ Edge* edge = GetNode("out")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ // A missing implicit dep *should* make the output dirty.
+ // (In fact, a build will fail.)
+ // This is a change from prior semantics of ninja.
+ EXPECT_TRUE(GetNode("out")->dirty());
+}
+
+TEST_F(GraphTest, ModifiedImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out: cat in | implicit\n"));
+ fs_.Create("in", "");
+ fs_.Create("out", "");
+ fs_.Tick();
+ fs_.Create("implicit", "");
+
+ Edge* edge = GetNode("out")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ // A modified implicit dep should make the output dirty.
+ EXPECT_TRUE(GetNode("out")->dirty());
+}
+
+TEST_F(GraphTest, FunkyMakefilePath) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build out.o: catdep foo.cc\n"));
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: ./foo/../implicit.h\n");
+ fs_.Create("out.o", "");
+ fs_.Tick();
+ fs_.Create("implicit.h", "");
+
+ Edge* edge = GetNode("out.o")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ // implicit.h has changed, though our depfile refers to it with a
+ // non-canonical path; we should still find it.
+ EXPECT_TRUE(GetNode("out.o")->dirty());
+}
+
+TEST_F(GraphTest, ExplicitImplicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build implicit.h: cat data\n"
+"build out.o: catdep foo.cc || implicit.h\n"));
+ fs_.Create("implicit.h", "");
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: implicit.h\n");
+ fs_.Create("out.o", "");
+ fs_.Tick();
+ fs_.Create("data", "");
+
+ Edge* edge = GetNode("out.o")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ // We have both an implicit and an explicit dep on implicit.h.
+ // The implicit dep should "win" (in the sense that it should cause
+ // the output to be dirty).
+ EXPECT_TRUE(GetNode("out.o")->dirty());
+}
+
+TEST_F(GraphTest, PathWithCurrentDirectory) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build ./out.o: catdep ./foo.cc\n"));
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: foo.cc\n");
+ fs_.Create("out.o", "");
+
+ Edge* edge = GetNode("out.o")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("out.o")->dirty());
+}
+
+TEST_F(GraphTest, RootNodes) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build out1: cat in1\n"
+"build mid1: cat in1\n"
+"build out2: cat mid1\n"
+"build out3 out4: cat mid1\n"));
+
+ string err;
+ vector<Node*> root_nodes = state_.RootNodes(&err);
+ EXPECT_EQ(4u, root_nodes.size());
+ for (size_t i = 0; i < root_nodes.size(); ++i) {
+ string name = root_nodes[i]->path();
+ EXPECT_EQ("out", name.substr(0, 3));
+ }
+}
+
+TEST_F(GraphTest, VarInOutQuoteSpaces) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a$ b: cat nospace with$ space nospace2\n"));
+
+ Edge* edge = GetNode("a b")->in_edge();
+ EXPECT_EQ("cat nospace \"with space\" nospace2 > \"a b\"",
+ edge->EvaluateCommand());
+}
+
+// Regression test for https://github.com/martine/ninja/issues/380
+TEST_F(GraphTest, DepfileWithCanonicalizablePath) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build ./out.o: catdep ./foo.cc\n"));
+ fs_.Create("foo.cc", "");
+ fs_.Create("out.o.d", "out.o: bar/../foo.cc\n");
+ fs_.Create("out.o", "");
+
+ Edge* edge = GetNode("out.o")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ EXPECT_FALSE(GetNode("out.o")->dirty());
+}
+
+// Regression test for https://github.com/martine/ninja/issues/404
+TEST_F(GraphTest, DepfileRemoved) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule catdep\n"
+" depfile = $out.d\n"
+" command = cat $in > $out\n"
+"build ./out.o: catdep ./foo.cc\n"));
+ fs_.Create("foo.h", "");
+ fs_.Create("foo.cc", "");
+ fs_.Tick();
+ fs_.Create("out.o.d", "out.o: foo.h\n");
+ fs_.Create("out.o", "");
+
+ Edge* edge = GetNode("out.o")->in_edge();
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+ EXPECT_FALSE(GetNode("out.o")->dirty());
+
+ state_.Reset();
+ fs_.RemoveFile("out.o.d");
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+ EXPECT_TRUE(GetNode("out.o")->dirty());
+}
+
+// Check that rule-level variables are in scope for eval.
+TEST_F(GraphTest, RuleVariablesInScope) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" depfile = x\n"
+" command = depfile is $depfile\n"
+"build out: r in\n"));
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ("depfile is x", edge->EvaluateCommand());
+}
+
+// Check that build statements can override rule builtins like depfile.
+TEST_F(GraphTest, DepfileOverride) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" depfile = x\n"
+" command = unused\n"
+"build out: r in\n"
+" depfile = y\n"));
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ("y", edge->GetBinding("depfile"));
+}
+
+// Check that overridden values show up in expansion of rule-level bindings.
+TEST_F(GraphTest, DepfileOverrideParent) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+" depfile = x\n"
+" command = depfile is $depfile\n"
+"build out: r in\n"
+" depfile = y\n"));
+ Edge* edge = GetNode("out")->in_edge();
+ EXPECT_EQ("depfile is y", edge->GetBinding("command"));
+}
diff --git a/ninja/src/graphviz.cc b/ninja/src/graphviz.cc
new file mode 100644
index 00000000000..8354a2277a6
--- /dev/null
+++ b/ninja/src/graphviz.cc
@@ -0,0 +1,77 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "graphviz.h"
+
+#include <stdio.h>
+
+#include "graph.h"
+
+void GraphViz::AddTarget(Node* node) {
+ if (visited_nodes_.find(node) != visited_nodes_.end())
+ return;
+
+ printf("\"%p\" [label=\"%s\"]\n", node, node->path().c_str());
+ visited_nodes_.insert(node);
+
+ Edge* edge = node->in_edge();
+
+ if (!edge) {
+ // Leaf node.
+ // Draw as a rect?
+ return;
+ }
+
+ if (visited_edges_.find(edge) != visited_edges_.end())
+ return;
+ visited_edges_.insert(edge);
+
+ if (edge->inputs_.size() == 1 && edge->outputs_.size() == 1) {
+ // Can draw simply.
+ // Note extra space before label text -- this is cosmetic and feels
+ // like a graphviz bug.
+ printf("\"%p\" -> \"%p\" [label=\" %s\"]\n",
+ edge->inputs_[0], edge->outputs_[0], edge->rule_->name().c_str());
+ } else {
+ printf("\"%p\" [label=\"%s\", shape=ellipse]\n",
+ edge, edge->rule_->name().c_str());
+ for (vector<Node*>::iterator out = edge->outputs_.begin();
+ out != edge->outputs_.end(); ++out) {
+ printf("\"%p\" -> \"%p\"\n", edge, *out);
+ }
+ for (vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in) {
+ const char* order_only = "";
+ if (edge->is_order_only(in - edge->inputs_.begin()))
+ order_only = " style=dotted";
+ printf("\"%p\" -> \"%p\" [arrowhead=none%s]\n", (*in), edge, order_only);
+ }
+ }
+
+ for (vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in) {
+ AddTarget(*in);
+ }
+}
+
+void GraphViz::Start() {
+ printf("digraph ninja {\n");
+ printf("rankdir=\"LR\"\n");
+ printf("node [fontsize=10, shape=box, height=0.25]\n");
+ printf("edge [fontsize=10]\n");
+}
+
+void GraphViz::Finish() {
+ printf("}\n");
+}
diff --git a/ninja/src/graphviz.h b/ninja/src/graphviz.h
new file mode 100644
index 00000000000..1e2a29d9d83
--- /dev/null
+++ b/ninja/src/graphviz.h
@@ -0,0 +1,34 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_GRAPHVIZ_H_
+#define NINJA_GRAPHVIZ_H_
+
+#include <set>
+using namespace std;
+
+struct Node;
+struct Edge;
+
+/// Runs the process of creating GraphViz .dot file output.
+struct GraphViz {
+ void Start();
+ void AddTarget(Node* node);
+ void Finish();
+
+ set<Node*> visited_nodes_;
+ set<Edge*> visited_edges_;
+};
+
+#endif // NINJA_GRAPHVIZ_H_
diff --git a/ninja/src/hash_collision_bench.cc b/ninja/src/hash_collision_bench.cc
new file mode 100644
index 00000000000..d0eabde6ddf
--- /dev/null
+++ b/ninja/src/hash_collision_bench.cc
@@ -0,0 +1,62 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "build_log.h"
+
+#include <algorithm>
+using namespace std;
+
+#include <time.h>
+
+int random(int low, int high) {
+ return int(low + (rand() / double(RAND_MAX)) * (high - low) + 0.5);
+}
+
+void RandomCommand(char** s) {
+ int len = random(5, 100);
+ *s = new char[len];
+ for (int i = 0; i < len; ++i)
+ (*s)[i] = (char)random(32, 127);
+}
+
+int main() {
+ const int N = 20 * 1000 * 1000;
+
+ // Leak these, else 10% of the runtime is spent destroying strings.
+ char** commands = new char*[N];
+ pair<uint64_t, int>* hashes = new pair<uint64_t, int>[N];
+
+ srand((int)time(NULL));
+
+ for (int i = 0; i < N; ++i) {
+ RandomCommand(&commands[i]);
+ hashes[i] = make_pair(BuildLog::LogEntry::HashCommand(commands[i]), i);
+ }
+
+ sort(hashes, hashes + N);
+
+ int num_collisions = 0;
+ for (int i = 1; i < N; ++i) {
+ if (hashes[i - 1].first == hashes[i].first) {
+ if (strcmp(commands[hashes[i - 1].second],
+ commands[hashes[i].second]) != 0) {
+ printf("collision!\n string 1: '%s'\n string 2: '%s'\n",
+ commands[hashes[i - 1].second],
+ commands[hashes[i].second]);
+ num_collisions++;
+ }
+ }
+ }
+ printf("\n\n%d collisions after %d runs\n", num_collisions, N);
+}
diff --git a/ninja/src/hash_map.h b/ninja/src/hash_map.h
new file mode 100644
index 00000000000..076f6c02198
--- /dev/null
+++ b/ninja/src/hash_map.h
@@ -0,0 +1,109 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_MAP_H_
+#define NINJA_MAP_H_
+
+#include "string_piece.h"
+
+// MurmurHash2, by Austin Appleby
+static inline
+unsigned int MurmurHash2(const void* key, size_t len) {
+ static const unsigned int seed = 0xDECAFBAD;
+ const unsigned int m = 0x5bd1e995;
+ const int r = 24;
+ unsigned int h = seed ^ len;
+ const unsigned char * data = (const unsigned char *)key;
+ while (len >= 4) {
+ unsigned int k = *(unsigned int *)data;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ data += 4;
+ len -= 4;
+ }
+ switch (len) {
+ case 3: h ^= data[2] << 16;
+ case 2: h ^= data[1] << 8;
+ case 1: h ^= data[0];
+ h *= m;
+ };
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+#ifdef _MSC_VER
+#include <hash_map>
+
+using stdext::hash_map;
+using stdext::hash_compare;
+
+struct StringPieceCmp : public hash_compare<StringPiece> {
+ size_t operator()(const StringPiece& key) const {
+ return MurmurHash2(key.str_, key.len_);
+ }
+ bool operator()(const StringPiece& a, const StringPiece& b) const {
+ int cmp = strncmp(a.str_, b.str_, min(a.len_, b.len_));
+ if (cmp < 0) {
+ return true;
+ } else if (cmp > 0) {
+ return false;
+ } else {
+ return a.len_ < b.len_;
+ }
+ }
+};
+
+#else
+
+#include <ext/hash_map>
+
+using __gnu_cxx::hash_map;
+
+namespace __gnu_cxx {
+template<>
+struct hash<std::string> {
+ size_t operator()(const std::string& s) const {
+ return hash<const char*>()(s.c_str());
+ }
+};
+
+template<>
+struct hash<StringPiece> {
+ size_t operator()(StringPiece key) const {
+ return MurmurHash2(key.str_, key.len_);
+ }
+};
+
+}
+#endif
+
+/// A template for hash_maps keyed by a StringPiece whose string is
+/// owned externally (typically by the values). Use like:
+/// ExternalStringHash<Foo*>::Type foos; to make foos into a hash
+/// mapping StringPiece => Foo*.
+template<typename V>
+struct ExternalStringHashMap {
+#ifdef _MSC_VER
+ typedef hash_map<StringPiece, V, StringPieceCmp> Type;
+#else
+ typedef hash_map<StringPiece, V> Type;
+#endif
+};
+
+#endif // NINJA_MAP_H_
diff --git a/ninja/src/includes_normalize-win32.cc b/ninja/src/includes_normalize-win32.cc
new file mode 100644
index 00000000000..05ce75d190c
--- /dev/null
+++ b/ninja/src/includes_normalize-win32.cc
@@ -0,0 +1,115 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "includes_normalize.h"
+
+#include "string_piece.h"
+#include "util.h"
+
+#include <algorithm>
+#include <iterator>
+#include <sstream>
+
+#include <windows.h>
+
+namespace {
+
+/// Return true if paths a and b are on the same Windows drive.
+bool SameDrive(StringPiece a, StringPiece b) {
+ char a_absolute[_MAX_PATH];
+ char b_absolute[_MAX_PATH];
+ GetFullPathName(a.AsString().c_str(), sizeof(a_absolute), a_absolute, NULL);
+ GetFullPathName(b.AsString().c_str(), sizeof(b_absolute), b_absolute, NULL);
+ char a_drive[_MAX_DIR];
+ char b_drive[_MAX_DIR];
+ _splitpath(a_absolute, a_drive, NULL, NULL, NULL);
+ _splitpath(b_absolute, b_drive, NULL, NULL, NULL);
+ return _stricmp(a_drive, b_drive) == 0;
+}
+
+} // anonymous namespace
+
+string IncludesNormalize::Join(const vector<string>& list, char sep) {
+ string ret;
+ for (size_t i = 0; i < list.size(); ++i) {
+ ret += list[i];
+ if (i != list.size() - 1)
+ ret += sep;
+ }
+ return ret;
+}
+
+vector<string> IncludesNormalize::Split(const string& input, char sep) {
+ vector<string> elems;
+ stringstream ss(input);
+ string item;
+ while (getline(ss, item, sep))
+ elems.push_back(item);
+ return elems;
+}
+
+string IncludesNormalize::ToLower(const string& s) {
+ string ret;
+ transform(s.begin(), s.end(), back_inserter(ret), ::tolower);
+ return ret;
+}
+
+string IncludesNormalize::AbsPath(StringPiece s) {
+ char result[_MAX_PATH];
+ GetFullPathName(s.AsString().c_str(), sizeof(result), result, NULL);
+ return result;
+}
+
+string IncludesNormalize::Relativize(StringPiece path, const string& start) {
+ vector<string> start_list = Split(AbsPath(start), '\\');
+ vector<string> path_list = Split(AbsPath(path), '\\');
+ int i;
+ for (i = 0; i < static_cast<int>(min(start_list.size(), path_list.size()));
+ ++i) {
+ if (ToLower(start_list[i]) != ToLower(path_list[i]))
+ break;
+ }
+
+ vector<string> rel_list;
+ for (int j = 0; j < static_cast<int>(start_list.size() - i); ++j)
+ rel_list.push_back("..");
+ for (int j = i; j < static_cast<int>(path_list.size()); ++j)
+ rel_list.push_back(path_list[j]);
+ if (rel_list.size() == 0)
+ return ".";
+ return Join(rel_list, '\\');
+}
+
+string IncludesNormalize::Normalize(const string& input,
+ const char* relative_to) {
+ char copy[_MAX_PATH];
+ size_t len = input.size();
+ strncpy(copy, input.c_str(), input.size() + 1);
+ for (size_t j = 0; j < len; ++j)
+ if (copy[j] == '/')
+ copy[j] = '\\';
+ string err;
+ if (!CanonicalizePath(copy, &len, &err)) {
+ Warning("couldn't canonicalize '%s: %s\n", input.c_str(), err.c_str());
+ }
+ string curdir;
+ if (!relative_to) {
+ curdir = AbsPath(".");
+ relative_to = curdir.c_str();
+ }
+ StringPiece partially_fixed(copy, len);
+ if (!SameDrive(partially_fixed, relative_to))
+ return partially_fixed.AsString();
+ return Relativize(partially_fixed, relative_to);
+}
diff --git a/ninja/src/includes_normalize.h b/ninja/src/includes_normalize.h
new file mode 100644
index 00000000000..43527af9412
--- /dev/null
+++ b/ninja/src/includes_normalize.h
@@ -0,0 +1,35 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+#include <vector>
+using namespace std;
+
+struct StringPiece;
+
+/// Utility functions for normalizing include paths on Windows.
+/// TODO: this likely duplicates functionality of CanonicalizePath; refactor.
+struct IncludesNormalize {
+ // Internal utilities made available for testing, maybe useful otherwise.
+ static string Join(const vector<string>& list, char sep);
+ static vector<string> Split(const string& input, char sep);
+ static string ToLower(const string& s);
+ static string AbsPath(StringPiece s);
+ static string Relativize(StringPiece path, const string& start);
+
+ /// Normalize by fixing slashes style, fixing redundant .. and . and makes the
+ /// path relative to |relative_to|. Case is normalized to lowercase on
+ /// Windows too.
+ static string Normalize(const string& input, const char* relative_to);
+};
diff --git a/ninja/src/includes_normalize_test.cc b/ninja/src/includes_normalize_test.cc
new file mode 100644
index 00000000000..1713d5d516d
--- /dev/null
+++ b/ninja/src/includes_normalize_test.cc
@@ -0,0 +1,104 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "includes_normalize.h"
+
+#include <gtest/gtest.h>
+
+#include "test.h"
+#include "util.h"
+
+TEST(IncludesNormalize, Simple) {
+ EXPECT_EQ("b", IncludesNormalize::Normalize("a\\..\\b", NULL));
+ EXPECT_EQ("b", IncludesNormalize::Normalize("a\\../b", NULL));
+ EXPECT_EQ("a\\b", IncludesNormalize::Normalize("a\\.\\b", NULL));
+ EXPECT_EQ("a\\b", IncludesNormalize::Normalize("a\\./b", NULL));
+}
+
+namespace {
+
+string GetCurDir() {
+ char buf[_MAX_PATH];
+ _getcwd(buf, sizeof(buf));
+ vector<string> parts = IncludesNormalize::Split(string(buf), '\\');
+ return parts[parts.size() - 1];
+}
+
+} // namespace
+
+TEST(IncludesNormalize, WithRelative) {
+ string currentdir = IncludesNormalize::ToLower(GetCurDir());
+ EXPECT_EQ("c", IncludesNormalize::Normalize("a/b/c", "a/b"));
+ EXPECT_EQ("a", IncludesNormalize::Normalize(IncludesNormalize::AbsPath("a"),
+ NULL));
+ EXPECT_EQ(string("..\\") + currentdir + string("\\a"),
+ IncludesNormalize::Normalize("a", "../b"));
+ EXPECT_EQ(string("..\\") + currentdir + string("\\a\\b"),
+ IncludesNormalize::Normalize("a/b", "../c"));
+ EXPECT_EQ("..\\..\\a", IncludesNormalize::Normalize("a", "b/c"));
+ EXPECT_EQ(".", IncludesNormalize::Normalize("a", "a"));
+}
+
+TEST(IncludesNormalize, Case) {
+ EXPECT_EQ("b", IncludesNormalize::Normalize("Abc\\..\\b", NULL));
+ EXPECT_EQ("BdEf", IncludesNormalize::Normalize("Abc\\..\\BdEf", NULL));
+ EXPECT_EQ("A\\b", IncludesNormalize::Normalize("A\\.\\b", NULL));
+ EXPECT_EQ("a\\b", IncludesNormalize::Normalize("a\\./b", NULL));
+ EXPECT_EQ("A\\B", IncludesNormalize::Normalize("A\\.\\B", NULL));
+ EXPECT_EQ("A\\B", IncludesNormalize::Normalize("A\\./B", NULL));
+}
+
+TEST(IncludesNormalize, Join) {
+ vector<string> x;
+ EXPECT_EQ("", IncludesNormalize::Join(x, ':'));
+ x.push_back("alpha");
+ EXPECT_EQ("alpha", IncludesNormalize::Join(x, ':'));
+ x.push_back("beta");
+ x.push_back("gamma");
+ EXPECT_EQ("alpha:beta:gamma", IncludesNormalize::Join(x, ':'));
+}
+
+TEST(IncludesNormalize, Split) {
+ EXPECT_EQ("", IncludesNormalize::Join(IncludesNormalize::Split("", '/'),
+ ':'));
+ EXPECT_EQ("a", IncludesNormalize::Join(IncludesNormalize::Split("a", '/'),
+ ':'));
+ EXPECT_EQ("a:b:c",
+ IncludesNormalize::Join(
+ IncludesNormalize::Split("a/b/c", '/'), ':'));
+}
+
+TEST(IncludesNormalize, ToLower) {
+ EXPECT_EQ("", IncludesNormalize::ToLower(""));
+ EXPECT_EQ("stuff", IncludesNormalize::ToLower("Stuff"));
+ EXPECT_EQ("stuff and things", IncludesNormalize::ToLower("Stuff AND thINGS"));
+ EXPECT_EQ("stuff 3and thin43gs",
+ IncludesNormalize::ToLower("Stuff 3AND thIN43GS"));
+}
+
+TEST(IncludesNormalize, DifferentDrive) {
+ EXPECT_EQ("stuff.h",
+ IncludesNormalize::Normalize("p:\\vs08\\stuff.h", "p:\\vs08"));
+ EXPECT_EQ("stuff.h",
+ IncludesNormalize::Normalize("P:\\Vs08\\stuff.h", "p:\\vs08"));
+ EXPECT_EQ("p:\\vs08\\stuff.h",
+ IncludesNormalize::Normalize("p:\\vs08\\stuff.h", "c:\\vs08"));
+ EXPECT_EQ("P:\\vs08\\stufF.h",
+ IncludesNormalize::Normalize("P:\\vs08\\stufF.h", "D:\\stuff/things"));
+ EXPECT_EQ("P:\\vs08\\stuff.h",
+ IncludesNormalize::Normalize("P:/vs08\\stuff.h", "D:\\stuff/things"));
+ // TODO: this fails; fix it.
+ //EXPECT_EQ("P:\\wee\\stuff.h",
+ // IncludesNormalize::Normalize("P:/vs08\\../wee\\stuff.h", "D:\\stuff/things"));
+}
diff --git a/ninja/src/inline.sh b/ninja/src/inline.sh
new file mode 100755
index 00000000000..5acc17b990f
--- /dev/null
+++ b/ninja/src/inline.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# Copyright 2001 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This quick script converts a text file into an #include-able header.
+# It expects the name of the variable as its first argument, and reads
+# stdin and writes stdout.
+
+varname="$1"
+echo "const char $varname[] ="
+od -t x1 -A n -v | sed -e 's| ||g; s|..|\\x&|g; s|^|"|; s|$|"|'
+echo ";"
+
diff --git a/ninja/src/lexer.cc b/ninja/src/lexer.cc
new file mode 100644
index 00000000000..685fe818fcb
--- /dev/null
+++ b/ninja/src/lexer.cc
@@ -0,0 +1,816 @@
+/* Generated by re2c 0.13.5 */
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "lexer.h"
+
+#include <stdio.h>
+
+#include "eval_env.h"
+#include "util.h"
+
+bool Lexer::Error(const string& message, string* err) {
+ // Compute line/column.
+ int line = 1;
+ const char* context = input_.str_;
+ for (const char* p = input_.str_; p < last_token_; ++p) {
+ if (*p == '\n') {
+ ++line;
+ context = p + 1;
+ }
+ }
+ int col = last_token_ ? (int)(last_token_ - context) : 0;
+
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s:%d: ", filename_.AsString().c_str(), line);
+ *err = buf;
+ *err += message + "\n";
+
+ // Add some context to the message.
+ const int kTruncateColumn = 72;
+ if (col > 0 && col < kTruncateColumn) {
+ int len;
+ bool truncated = true;
+ for (len = 0; len < kTruncateColumn; ++len) {
+ if (context[len] == 0 || context[len] == '\n') {
+ truncated = false;
+ break;
+ }
+ }
+ *err += string(context, len);
+ if (truncated)
+ *err += "...";
+ *err += "\n";
+ *err += string(col, ' ');
+ *err += "^ near here";
+ }
+
+ return false;
+}
+
+Lexer::Lexer(const char* input) {
+ Start("input", input);
+}
+
+void Lexer::Start(StringPiece filename, StringPiece input) {
+ filename_ = filename;
+ input_ = input;
+ ofs_ = input_.str_;
+ last_token_ = NULL;
+}
+
+const char* Lexer::TokenName(Token t) {
+ switch (t) {
+ case ERROR: return "lexing error";
+ case BUILD: return "'build'";
+ case COLON: return "':'";
+ case DEFAULT: return "'default'";
+ case EQUALS: return "'='";
+ case IDENT: return "identifier";
+ case INCLUDE: return "'include'";
+ case INDENT: return "indent";
+ case NEWLINE: return "newline";
+ case PIPE2: return "'||'";
+ case PIPE: return "'|'";
+ case POOL: return "'pool'";
+ case RULE: return "'rule'";
+ case SUBNINJA: return "'subninja'";
+ case TEOF: return "eof";
+ }
+ return NULL; // not reached
+}
+
+const char* Lexer::TokenErrorHint(Token expected) {
+ switch (expected) {
+ case COLON:
+ return " ($ also escapes ':')";
+ default:
+ return "";
+ }
+}
+
+string Lexer::DescribeLastError() {
+ if (last_token_) {
+ switch (last_token_[0]) {
+ case '\r':
+ return "carriage returns are not allowed, use newlines";
+ case '\t':
+ return "tabs are not allowed, use spaces";
+ }
+ }
+ return "lexing error";
+}
+
+void Lexer::UnreadToken() {
+ ofs_ = last_token_;
+}
+
+Lexer::Token Lexer::ReadToken() {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ Lexer::Token token;
+ for (;;) {
+ start = p;
+
+{
+ unsigned char yych;
+ unsigned int yyaccept = 0;
+ static const unsigned char yybm[] = {
+ 0, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 0, 64, 64, 0, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 192, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 96, 96, 64,
+ 96, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 64, 64, 64, 64, 64, 64,
+ 64, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 96, 64, 64, 64, 64, 96,
+ 64, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 96, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ };
+
+ yych = *p;
+ if (yych <= '^') {
+ if (yych <= ',') {
+ if (yych <= 0x1F) {
+ if (yych <= 0x00) goto yy22;
+ if (yych == '\n') goto yy6;
+ goto yy24;
+ } else {
+ if (yych <= ' ') goto yy2;
+ if (yych == '#') goto yy4;
+ goto yy24;
+ }
+ } else {
+ if (yych <= ':') {
+ if (yych == '/') goto yy24;
+ if (yych <= '9') goto yy21;
+ goto yy15;
+ } else {
+ if (yych <= '=') {
+ if (yych <= '<') goto yy24;
+ goto yy13;
+ } else {
+ if (yych <= '@') goto yy24;
+ if (yych <= 'Z') goto yy21;
+ goto yy24;
+ }
+ }
+ }
+ } else {
+ if (yych <= 'i') {
+ if (yych <= 'b') {
+ if (yych == '`') goto yy24;
+ if (yych <= 'a') goto yy21;
+ goto yy8;
+ } else {
+ if (yych == 'd') goto yy12;
+ if (yych <= 'h') goto yy21;
+ goto yy19;
+ }
+ } else {
+ if (yych <= 'r') {
+ if (yych == 'p') goto yy10;
+ if (yych <= 'q') goto yy21;
+ goto yy11;
+ } else {
+ if (yych <= 'z') {
+ if (yych <= 's') goto yy20;
+ goto yy21;
+ } else {
+ if (yych == '|') goto yy17;
+ goto yy24;
+ }
+ }
+ }
+ }
+yy2:
+ yyaccept = 0;
+ yych = *(q = ++p);
+ goto yy70;
+yy3:
+ { token = INDENT; break; }
+yy4:
+ yyaccept = 1;
+ yych = *(q = ++p);
+ if (yych <= 0x00) goto yy5;
+ if (yych != '\r') goto yy65;
+yy5:
+ { token = ERROR; break; }
+yy6:
+ ++p;
+yy7:
+ { token = NEWLINE; break; }
+yy8:
+ ++p;
+ if ((yych = *p) == 'u') goto yy59;
+ goto yy26;
+yy9:
+ { token = IDENT; break; }
+yy10:
+ yych = *++p;
+ if (yych == 'o') goto yy55;
+ goto yy26;
+yy11:
+ yych = *++p;
+ if (yych == 'u') goto yy51;
+ goto yy26;
+yy12:
+ yych = *++p;
+ if (yych == 'e') goto yy44;
+ goto yy26;
+yy13:
+ ++p;
+ { token = EQUALS; break; }
+yy15:
+ ++p;
+ { token = COLON; break; }
+yy17:
+ ++p;
+ if ((yych = *p) == '|') goto yy42;
+ { token = PIPE; break; }
+yy19:
+ yych = *++p;
+ if (yych == 'n') goto yy35;
+ goto yy26;
+yy20:
+ yych = *++p;
+ if (yych == 'u') goto yy27;
+ goto yy26;
+yy21:
+ yych = *++p;
+ goto yy26;
+yy22:
+ ++p;
+ { token = TEOF; break; }
+yy24:
+ yych = *++p;
+ goto yy5;
+yy25:
+ ++p;
+ yych = *p;
+yy26:
+ if (yybm[0+yych] & 32) {
+ goto yy25;
+ }
+ goto yy9;
+yy27:
+ yych = *++p;
+ if (yych != 'b') goto yy26;
+ yych = *++p;
+ if (yych != 'n') goto yy26;
+ yych = *++p;
+ if (yych != 'i') goto yy26;
+ yych = *++p;
+ if (yych != 'n') goto yy26;
+ yych = *++p;
+ if (yych != 'j') goto yy26;
+ yych = *++p;
+ if (yych != 'a') goto yy26;
+ ++p;
+ if (yybm[0+(yych = *p)] & 32) {
+ goto yy25;
+ }
+ { token = SUBNINJA; break; }
+yy35:
+ yych = *++p;
+ if (yych != 'c') goto yy26;
+ yych = *++p;
+ if (yych != 'l') goto yy26;
+ yych = *++p;
+ if (yych != 'u') goto yy26;
+ yych = *++p;
+ if (yych != 'd') goto yy26;
+ yych = *++p;
+ if (yych != 'e') goto yy26;
+ ++p;
+ if (yybm[0+(yych = *p)] & 32) {
+ goto yy25;
+ }
+ { token = INCLUDE; break; }
+yy42:
+ ++p;
+ { token = PIPE2; break; }
+yy44:
+ yych = *++p;
+ if (yych != 'f') goto yy26;
+ yych = *++p;
+ if (yych != 'a') goto yy26;
+ yych = *++p;
+ if (yych != 'u') goto yy26;
+ yych = *++p;
+ if (yych != 'l') goto yy26;
+ yych = *++p;
+ if (yych != 't') goto yy26;
+ ++p;
+ if (yybm[0+(yych = *p)] & 32) {
+ goto yy25;
+ }
+ { token = DEFAULT; break; }
+yy51:
+ yych = *++p;
+ if (yych != 'l') goto yy26;
+ yych = *++p;
+ if (yych != 'e') goto yy26;
+ ++p;
+ if (yybm[0+(yych = *p)] & 32) {
+ goto yy25;
+ }
+ { token = RULE; break; }
+yy55:
+ yych = *++p;
+ if (yych != 'o') goto yy26;
+ yych = *++p;
+ if (yych != 'l') goto yy26;
+ ++p;
+ if (yybm[0+(yych = *p)] & 32) {
+ goto yy25;
+ }
+ { token = POOL; break; }
+yy59:
+ yych = *++p;
+ if (yych != 'i') goto yy26;
+ yych = *++p;
+ if (yych != 'l') goto yy26;
+ yych = *++p;
+ if (yych != 'd') goto yy26;
+ ++p;
+ if (yybm[0+(yych = *p)] & 32) {
+ goto yy25;
+ }
+ { token = BUILD; break; }
+yy64:
+ ++p;
+ yych = *p;
+yy65:
+ if (yybm[0+yych] & 64) {
+ goto yy64;
+ }
+ if (yych <= 0x00) goto yy66;
+ if (yych <= '\f') goto yy67;
+yy66:
+ p = q;
+ if (yyaccept <= 0) {
+ goto yy3;
+ } else {
+ goto yy5;
+ }
+yy67:
+ ++p;
+ { continue; }
+yy69:
+ yyaccept = 0;
+ q = ++p;
+ yych = *p;
+yy70:
+ if (yybm[0+yych] & 128) {
+ goto yy69;
+ }
+ if (yych == '\n') goto yy71;
+ if (yych == '#') goto yy64;
+ goto yy3;
+yy71:
+ ++p;
+ yych = *p;
+ goto yy7;
+}
+
+ }
+
+ last_token_ = start;
+ ofs_ = p;
+ if (token != NEWLINE && token != TEOF)
+ EatWhitespace();
+ return token;
+}
+
+bool Lexer::PeekToken(Token token) {
+ Token t = ReadToken();
+ if (t == token)
+ return true;
+ UnreadToken();
+ return false;
+}
+
+void Lexer::EatWhitespace() {
+ const char* p = ofs_;
+ for (;;) {
+ ofs_ = p;
+
+{
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 128, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ yych = *p;
+ if (yych <= ' ') {
+ if (yych <= 0x00) goto yy78;
+ if (yych <= 0x1F) goto yy80;
+ } else {
+ if (yych == '$') goto yy76;
+ goto yy80;
+ }
+ ++p;
+ yych = *p;
+ goto yy84;
+yy75:
+ { continue; }
+yy76:
+ ++p;
+ if ((yych = *p) == '\n') goto yy81;
+yy77:
+ { break; }
+yy78:
+ ++p;
+ { break; }
+yy80:
+ yych = *++p;
+ goto yy77;
+yy81:
+ ++p;
+ { continue; }
+yy83:
+ ++p;
+ yych = *p;
+yy84:
+ if (yybm[0+yych] & 128) {
+ goto yy83;
+ }
+ goto yy75;
+}
+
+ }
+}
+
+bool Lexer::ReadIdent(string* out) {
+ const char* p = ofs_;
+ for (;;) {
+ const char* start = p;
+
+{
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 128, 128, 0,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 0, 0, 0, 0, 0, 0,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 0, 0, 128,
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+ yych = *p;
+ if (yych <= '@') {
+ if (yych <= '.') {
+ if (yych <= ',') goto yy89;
+ } else {
+ if (yych <= '/') goto yy89;
+ if (yych >= ':') goto yy89;
+ }
+ } else {
+ if (yych <= '_') {
+ if (yych <= 'Z') goto yy87;
+ if (yych <= '^') goto yy89;
+ } else {
+ if (yych <= '`') goto yy89;
+ if (yych >= '{') goto yy89;
+ }
+ }
+yy87:
+ ++p;
+ yych = *p;
+ goto yy92;
+yy88:
+ {
+ out->assign(start, p - start);
+ break;
+ }
+yy89:
+ ++p;
+ { return false; }
+yy91:
+ ++p;
+ yych = *p;
+yy92:
+ if (yybm[0+yych] & 128) {
+ goto yy91;
+ }
+ goto yy88;
+}
+
+ }
+ ofs_ = p;
+ EatWhitespace();
+ return true;
+}
+
+bool Lexer::ReadEvalString(EvalString* eval, bool path, string* err) {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ for (;;) {
+ start = p;
+
+{
+ unsigned char yych;
+ static const unsigned char yybm[] = {
+ 0, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 0, 128, 128, 0, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 16, 128, 128, 128, 0, 128, 128, 128,
+ 128, 128, 128, 128, 128, 224, 160, 128,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 0, 128, 128, 128, 128, 128,
+ 128, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 224, 128, 128, 128, 128, 224,
+ 128, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 224, 224, 224, 128, 0, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128,
+ };
+ yych = *p;
+ if (yych <= ' ') {
+ if (yych <= '\n') {
+ if (yych <= 0x00) goto yy101;
+ if (yych >= '\n') goto yy97;
+ } else {
+ if (yych == '\r') goto yy103;
+ if (yych >= ' ') goto yy97;
+ }
+ } else {
+ if (yych <= '9') {
+ if (yych == '$') goto yy99;
+ } else {
+ if (yych <= ':') goto yy97;
+ if (yych == '|') goto yy97;
+ }
+ }
+ ++p;
+ yych = *p;
+ goto yy126;
+yy96:
+ {
+ eval->AddText(StringPiece(start, p - start));
+ continue;
+ }
+yy97:
+ ++p;
+ {
+ if (path) {
+ p = start;
+ break;
+ } else {
+ if (*start == '\n')
+ break;
+ eval->AddText(StringPiece(start, 1));
+ continue;
+ }
+ }
+yy99:
+ ++p;
+ if ((yych = *p) <= '/') {
+ if (yych <= ' ') {
+ if (yych == '\n') goto yy115;
+ if (yych <= 0x1F) goto yy104;
+ goto yy106;
+ } else {
+ if (yych <= '$') {
+ if (yych <= '#') goto yy104;
+ goto yy108;
+ } else {
+ if (yych == '-') goto yy110;
+ goto yy104;
+ }
+ }
+ } else {
+ if (yych <= '^') {
+ if (yych <= ':') {
+ if (yych <= '9') goto yy110;
+ goto yy112;
+ } else {
+ if (yych <= '@') goto yy104;
+ if (yych <= 'Z') goto yy110;
+ goto yy104;
+ }
+ } else {
+ if (yych <= '`') {
+ if (yych <= '_') goto yy110;
+ goto yy104;
+ } else {
+ if (yych <= 'z') goto yy110;
+ if (yych <= '{') goto yy114;
+ goto yy104;
+ }
+ }
+ }
+yy100:
+ {
+ last_token_ = start;
+ return Error(DescribeLastError(), err);
+ }
+yy101:
+ ++p;
+ {
+ last_token_ = start;
+ return Error("unexpected EOF", err);
+ }
+yy103:
+ yych = *++p;
+ goto yy100;
+yy104:
+ ++p;
+yy105:
+ {
+ last_token_ = start;
+ return Error("bad $-escape (literal $ must be written as $$)", err);
+ }
+yy106:
+ ++p;
+ {
+ eval->AddText(StringPiece(" ", 1));
+ continue;
+ }
+yy108:
+ ++p;
+ {
+ eval->AddText(StringPiece("$", 1));
+ continue;
+ }
+yy110:
+ ++p;
+ yych = *p;
+ goto yy124;
+yy111:
+ {
+ eval->AddSpecial(StringPiece(start + 1, p - start - 1));
+ continue;
+ }
+yy112:
+ ++p;
+ {
+ eval->AddText(StringPiece(":", 1));
+ continue;
+ }
+yy114:
+ yych = *(q = ++p);
+ if (yybm[0+yych] & 32) {
+ goto yy118;
+ }
+ goto yy105;
+yy115:
+ ++p;
+ yych = *p;
+ if (yybm[0+yych] & 16) {
+ goto yy115;
+ }
+ {
+ continue;
+ }
+yy118:
+ ++p;
+ yych = *p;
+ if (yybm[0+yych] & 32) {
+ goto yy118;
+ }
+ if (yych == '}') goto yy121;
+ p = q;
+ goto yy105;
+yy121:
+ ++p;
+ {
+ eval->AddSpecial(StringPiece(start + 2, p - start - 3));
+ continue;
+ }
+yy123:
+ ++p;
+ yych = *p;
+yy124:
+ if (yybm[0+yych] & 64) {
+ goto yy123;
+ }
+ goto yy111;
+yy125:
+ ++p;
+ yych = *p;
+yy126:
+ if (yybm[0+yych] & 128) {
+ goto yy125;
+ }
+ goto yy96;
+}
+
+ }
+ last_token_ = start;
+ ofs_ = p;
+ if (path)
+ EatWhitespace();
+ // Non-path strings end in newlines, so there's no whitespace to eat.
+ return true;
+}
diff --git a/ninja/src/lexer.h b/ninja/src/lexer.h
new file mode 100644
index 00000000000..f366556afc5
--- /dev/null
+++ b/ninja/src/lexer.h
@@ -0,0 +1,105 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_LEXER_H_
+#define NINJA_LEXER_H_
+
+#include "string_piece.h"
+
+// Windows may #define ERROR.
+#ifdef ERROR
+#undef ERROR
+#endif
+
+struct EvalString;
+
+struct Lexer {
+ Lexer() {}
+ /// Helper ctor useful for tests.
+ explicit Lexer(const char* input);
+
+ enum Token {
+ ERROR,
+ BUILD,
+ COLON,
+ DEFAULT,
+ EQUALS,
+ IDENT,
+ INCLUDE,
+ INDENT,
+ NEWLINE,
+ PIPE,
+ PIPE2,
+ POOL,
+ RULE,
+ SUBNINJA,
+ TEOF,
+ };
+
+ /// Return a human-readable form of a token, used in error messages.
+ static const char* TokenName(Token t);
+
+ /// Return a human-readable token hint, used in error messages.
+ static const char* TokenErrorHint(Token expected);
+
+ /// If the last token read was an ERROR token, provide more info
+ /// or the empty string.
+ string DescribeLastError();
+
+ /// Start parsing some input.
+ void Start(StringPiece filename, StringPiece input);
+
+ /// Read a Token from the Token enum.
+ Token ReadToken();
+
+ /// Rewind to the last read Token.
+ void UnreadToken();
+
+ /// If the next token is \a token, read it and return true.
+ bool PeekToken(Token token);
+
+ /// Read a simple identifier (a rule or variable name).
+ /// Returns false if a name can't be read.
+ bool ReadIdent(string* out);
+
+ /// Read a path (complete with $escapes).
+ /// Returns false only on error, returned path may be empty if a delimiter
+ /// (space, newline) is hit.
+ bool ReadPath(EvalString* path, string* err) {
+ return ReadEvalString(path, true, err);
+ }
+
+ /// Read the value side of a var = value line (complete with $escapes).
+ /// Returns false only on error.
+ bool ReadVarValue(EvalString* value, string* err) {
+ return ReadEvalString(value, false, err);
+ }
+
+ /// Construct an error message with context.
+ bool Error(const string& message, string* err);
+
+private:
+ /// Skip past whitespace (called after each read token/ident/etc.).
+ void EatWhitespace();
+
+ /// Read a $-escaped string.
+ bool ReadEvalString(EvalString* eval, bool path, string* err);
+
+ StringPiece filename_;
+ StringPiece input_;
+ const char* ofs_;
+ const char* last_token_;
+};
+
+#endif // NINJA_LEXER_H_
diff --git a/ninja/src/lexer.in.cc b/ninja/src/lexer.in.cc
new file mode 100644
index 00000000000..93d5540c874
--- /dev/null
+++ b/ninja/src/lexer.in.cc
@@ -0,0 +1,264 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "lexer.h"
+
+#include <stdio.h>
+
+#include "eval_env.h"
+#include "util.h"
+
+bool Lexer::Error(const string& message, string* err) {
+ // Compute line/column.
+ int line = 1;
+ const char* context = input_.str_;
+ for (const char* p = input_.str_; p < last_token_; ++p) {
+ if (*p == '\n') {
+ ++line;
+ context = p + 1;
+ }
+ }
+ int col = last_token_ ? (int)(last_token_ - context) : 0;
+
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s:%d: ", filename_.AsString().c_str(), line);
+ *err = buf;
+ *err += message + "\n";
+
+ // Add some context to the message.
+ const int kTruncateColumn = 72;
+ if (col > 0 && col < kTruncateColumn) {
+ int len;
+ bool truncated = true;
+ for (len = 0; len < kTruncateColumn; ++len) {
+ if (context[len] == 0 || context[len] == '\n') {
+ truncated = false;
+ break;
+ }
+ }
+ *err += string(context, len);
+ if (truncated)
+ *err += "...";
+ *err += "\n";
+ *err += string(col, ' ');
+ *err += "^ near here";
+ }
+
+ return false;
+}
+
+Lexer::Lexer(const char* input) {
+ Start("input", input);
+}
+
+void Lexer::Start(StringPiece filename, StringPiece input) {
+ filename_ = filename;
+ input_ = input;
+ ofs_ = input_.str_;
+ last_token_ = NULL;
+}
+
+const char* Lexer::TokenName(Token t) {
+ switch (t) {
+ case ERROR: return "lexing error";
+ case BUILD: return "'build'";
+ case COLON: return "':'";
+ case DEFAULT: return "'default'";
+ case EQUALS: return "'='";
+ case IDENT: return "identifier";
+ case INCLUDE: return "'include'";
+ case INDENT: return "indent";
+ case NEWLINE: return "newline";
+ case PIPE2: return "'||'";
+ case PIPE: return "'|'";
+ case POOL: return "'pool'";
+ case RULE: return "'rule'";
+ case SUBNINJA: return "'subninja'";
+ case TEOF: return "eof";
+ }
+ return NULL; // not reached
+}
+
+const char* Lexer::TokenErrorHint(Token expected) {
+ switch (expected) {
+ case COLON:
+ return " ($ also escapes ':')";
+ default:
+ return "";
+ }
+}
+
+string Lexer::DescribeLastError() {
+ if (last_token_) {
+ switch (last_token_[0]) {
+ case '\r':
+ return "carriage returns are not allowed, use newlines";
+ case '\t':
+ return "tabs are not allowed, use spaces";
+ }
+ }
+ return "lexing error";
+}
+
+void Lexer::UnreadToken() {
+ ofs_ = last_token_;
+}
+
+Lexer::Token Lexer::ReadToken() {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ Lexer::Token token;
+ for (;;) {
+ start = p;
+ /*!re2c
+ re2c:define:YYCTYPE = "unsigned char";
+ re2c:define:YYCURSOR = p;
+ re2c:define:YYMARKER = q;
+ re2c:yyfill:enable = 0;
+
+ nul = "\000";
+ simple_varname = [a-zA-Z0-9_-]+;
+ varname = [a-zA-Z0-9_.-]+;
+
+ [ ]*"#"[^\000\r\n]*"\n" { continue; }
+ [ ]*[\n] { token = NEWLINE; break; }
+ [ ]+ { token = INDENT; break; }
+ "build" { token = BUILD; break; }
+ "pool" { token = POOL; break; }
+ "rule" { token = RULE; break; }
+ "default" { token = DEFAULT; break; }
+ "=" { token = EQUALS; break; }
+ ":" { token = COLON; break; }
+ "||" { token = PIPE2; break; }
+ "|" { token = PIPE; break; }
+ "include" { token = INCLUDE; break; }
+ "subninja" { token = SUBNINJA; break; }
+ varname { token = IDENT; break; }
+ nul { token = TEOF; break; }
+ [^] { token = ERROR; break; }
+ */
+ }
+
+ last_token_ = start;
+ ofs_ = p;
+ if (token != NEWLINE && token != TEOF)
+ EatWhitespace();
+ return token;
+}
+
+bool Lexer::PeekToken(Token token) {
+ Token t = ReadToken();
+ if (t == token)
+ return true;
+ UnreadToken();
+ return false;
+}
+
+void Lexer::EatWhitespace() {
+ const char* p = ofs_;
+ for (;;) {
+ ofs_ = p;
+ /*!re2c
+ [ ]+ { continue; }
+ "$\n" { continue; }
+ nul { break; }
+ [^] { break; }
+ */
+ }
+}
+
+bool Lexer::ReadIdent(string* out) {
+ const char* p = ofs_;
+ for (;;) {
+ const char* start = p;
+ /*!re2c
+ varname {
+ out->assign(start, p - start);
+ break;
+ }
+ [^] { return false; }
+ */
+ }
+ ofs_ = p;
+ EatWhitespace();
+ return true;
+}
+
+bool Lexer::ReadEvalString(EvalString* eval, bool path, string* err) {
+ const char* p = ofs_;
+ const char* q;
+ const char* start;
+ for (;;) {
+ start = p;
+ /*!re2c
+ [^$ :\r\n|\000]+ {
+ eval->AddText(StringPiece(start, p - start));
+ continue;
+ }
+ [ :|\n] {
+ if (path) {
+ p = start;
+ break;
+ } else {
+ if (*start == '\n')
+ break;
+ eval->AddText(StringPiece(start, 1));
+ continue;
+ }
+ }
+ "$$" {
+ eval->AddText(StringPiece("$", 1));
+ continue;
+ }
+ "$ " {
+ eval->AddText(StringPiece(" ", 1));
+ continue;
+ }
+ "$\n"[ ]* {
+ continue;
+ }
+ "${"varname"}" {
+ eval->AddSpecial(StringPiece(start + 2, p - start - 3));
+ continue;
+ }
+ "$"simple_varname {
+ eval->AddSpecial(StringPiece(start + 1, p - start - 1));
+ continue;
+ }
+ "$:" {
+ eval->AddText(StringPiece(":", 1));
+ continue;
+ }
+ "$". {
+ last_token_ = start;
+ return Error("bad $-escape (literal $ must be written as $$)", err);
+ }
+ nul {
+ last_token_ = start;
+ return Error("unexpected EOF", err);
+ }
+ [^] {
+ last_token_ = start;
+ return Error(DescribeLastError(), err);
+ }
+ */
+ }
+ last_token_ = start;
+ ofs_ = p;
+ if (path)
+ EatWhitespace();
+ // Non-path strings end in newlines, so there's no whitespace to eat.
+ return true;
+}
diff --git a/ninja/src/lexer_test.cc b/ninja/src/lexer_test.cc
new file mode 100644
index 00000000000..e8a164254e5
--- /dev/null
+++ b/ninja/src/lexer_test.cc
@@ -0,0 +1,97 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "lexer.h"
+
+#include <gtest/gtest.h>
+
+#include "eval_env.h"
+
+TEST(Lexer, ReadVarValue) {
+ Lexer lexer("plain text $var $VaR ${x}\n");
+ EvalString eval;
+ string err;
+ EXPECT_TRUE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ("[plain text ][$var][ ][$VaR][ ][$x]",
+ eval.Serialize());
+}
+
+TEST(Lexer, ReadEvalStringEscapes) {
+ Lexer lexer("$ $$ab c$: $\ncde\n");
+ EvalString eval;
+ string err;
+ EXPECT_TRUE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ("[ $ab c: cde]",
+ eval.Serialize());
+}
+
+TEST(Lexer, ReadIdent) {
+ Lexer lexer("foo baR baz_123 foo-bar");
+ string ident;
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("foo", ident);
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("baR", ident);
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("baz_123", ident);
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("foo-bar", ident);
+}
+
+TEST(Lexer, ReadIdentCurlies) {
+ // Verify that ReadIdent includes dots in the name,
+ // but in an expansion $bar.dots stops at the dot.
+ Lexer lexer("foo.dots $bar.dots ${bar.dots}\n");
+ string ident;
+ EXPECT_TRUE(lexer.ReadIdent(&ident));
+ EXPECT_EQ("foo.dots", ident);
+
+ EvalString eval;
+ string err;
+ EXPECT_TRUE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ("[$bar][.dots ][$bar.dots]",
+ eval.Serialize());
+}
+
+TEST(Lexer, Error) {
+ Lexer lexer("foo$\nbad $");
+ EvalString eval;
+ string err;
+ ASSERT_FALSE(lexer.ReadVarValue(&eval, &err));
+ EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n"
+ "bad $\n"
+ " ^ near here"
+ , err);
+}
+
+TEST(Lexer, CommentEOF) {
+ // Verify we don't run off the end of the string when the EOF is
+ // mid-comment.
+ Lexer lexer("# foo");
+ Lexer::Token token = lexer.ReadToken();
+ EXPECT_EQ(Lexer::ERROR, token);
+}
+
+TEST(Lexer, Tabs) {
+ // Verify we print a useful error on a disallowed character.
+ Lexer lexer(" \tfoobar");
+ Lexer::Token token = lexer.ReadToken();
+ EXPECT_EQ(Lexer::INDENT, token);
+ token = lexer.ReadToken();
+ EXPECT_EQ(Lexer::ERROR, token);
+ EXPECT_EQ("tabs are not allowed, use spaces", lexer.DescribeLastError());
+}
diff --git a/ninja/src/line_printer.cc b/ninja/src/line_printer.cc
new file mode 100644
index 00000000000..a75eb0505f4
--- /dev/null
+++ b/ninja/src/line_printer.cc
@@ -0,0 +1,109 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "line_printer.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#endif
+
+#include "util.h"
+
+LinePrinter::LinePrinter() : have_blank_line_(true) {
+#ifndef _WIN32
+ const char* term = getenv("TERM");
+ smart_terminal_ = isatty(1) && term && string(term) != "dumb";
+#else
+ // Disable output buffer. It'd be nice to use line buffering but
+ // MSDN says: "For some systems, [_IOLBF] provides line
+ // buffering. However, for Win32, the behavior is the same as _IOFBF
+ // - Full Buffering."
+ setvbuf(stdout, NULL, _IONBF, 0);
+ console_ = GetStdHandle(STD_OUTPUT_HANDLE);
+ CONSOLE_SCREEN_BUFFER_INFO csbi;
+ smart_terminal_ = GetConsoleScreenBufferInfo(console_, &csbi);
+#endif
+}
+
+void LinePrinter::Print(string to_print, LineType type) {
+#ifdef _WIN32
+ CONSOLE_SCREEN_BUFFER_INFO csbi;
+ GetConsoleScreenBufferInfo(console_, &csbi);
+#endif
+
+ if (smart_terminal_) {
+#ifndef _WIN32
+ printf("\r"); // Print over previous line, if any.
+#else
+ csbi.dwCursorPosition.X = 0;
+ SetConsoleCursorPosition(console_, csbi.dwCursorPosition);
+#endif
+ }
+
+ if (smart_terminal_ && type == ELIDE) {
+#ifdef _WIN32
+ // Don't use the full width or console will move to next line.
+ size_t width = static_cast<size_t>(csbi.dwSize.X) - 1;
+ to_print = ElideMiddle(to_print, width);
+ // We don't want to have the cursor spamming back and forth, so
+ // use WriteConsoleOutput instead which updates the contents of
+ // the buffer, but doesn't move the cursor position.
+ GetConsoleScreenBufferInfo(console_, &csbi);
+ COORD buf_size = { csbi.dwSize.X, 1 };
+ COORD zero_zero = { 0, 0 };
+ SMALL_RECT target = {
+ csbi.dwCursorPosition.X, csbi.dwCursorPosition.Y,
+ static_cast<SHORT>(csbi.dwCursorPosition.X + csbi.dwSize.X - 1),
+ csbi.dwCursorPosition.Y
+ };
+ CHAR_INFO* char_data = new CHAR_INFO[csbi.dwSize.X];
+ memset(char_data, 0, sizeof(CHAR_INFO) * csbi.dwSize.X);
+ for (int i = 0; i < csbi.dwSize.X; ++i) {
+ char_data[i].Char.AsciiChar = ' ';
+ char_data[i].Attributes = csbi.wAttributes;
+ }
+ for (size_t i = 0; i < to_print.size(); ++i)
+ char_data[i].Char.AsciiChar = to_print[i];
+ WriteConsoleOutput(console_, char_data, buf_size, zero_zero, &target);
+ delete[] char_data;
+#else
+ // Limit output to width of the terminal if provided so we don't cause
+ // line-wrapping.
+ winsize size;
+ if ((ioctl(0, TIOCGWINSZ, &size) == 0) && size.ws_col) {
+ to_print = ElideMiddle(to_print, size.ws_col);
+ }
+ printf("%s", to_print.c_str());
+ printf("\x1B[K"); // Clear to end of line.
+ fflush(stdout);
+#endif
+
+ have_blank_line_ = false;
+ } else {
+ printf("%s\n", to_print.c_str());
+ }
+}
+
+void LinePrinter::PrintOnNewLine(const string& to_print) {
+ if (!have_blank_line_)
+ printf("\n");
+ printf("%s", to_print.c_str());
+ have_blank_line_ = to_print.empty() || *to_print.rbegin() == '\n';
+}
diff --git a/ninja/src/line_printer.h b/ninja/src/line_printer.h
new file mode 100644
index 00000000000..aea28172729
--- /dev/null
+++ b/ninja/src/line_printer.h
@@ -0,0 +1,52 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_LINE_PRINTER_H_
+#define NINJA_LINE_PRINTER_H_
+
+#include <string>
+using namespace std;
+
+/// Prints lines of text, possibly overprinting previously printed lines
+/// if the terminal supports it.
+struct LinePrinter {
+ LinePrinter();
+
+ bool is_smart_terminal() const { return smart_terminal_; }
+ void set_smart_terminal(bool smart) { smart_terminal_ = smart; }
+
+ enum LineType {
+ FULL,
+ ELIDE
+ };
+ /// Overprints the current line. If type is ELIDE, elides to_print to fit on
+ /// one line.
+ void Print(string to_print, LineType type);
+
+ /// Prints a string on a new line, not overprinting previous output.
+ void PrintOnNewLine(const string& to_print);
+
+ private:
+ /// Whether we can do fancy terminal control codes.
+ bool smart_terminal_;
+
+ /// Whether the caret is at the beginning of a blank line.
+ bool have_blank_line_;
+
+#ifdef _WIN32
+ void* console_;
+#endif
+};
+
+#endif // NINJA_LINE_PRINTER_H_
diff --git a/ninja/src/manifest_parser.cc b/ninja/src/manifest_parser.cc
new file mode 100644
index 00000000000..d742331bb76
--- /dev/null
+++ b/ninja/src/manifest_parser.cc
@@ -0,0 +1,379 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "manifest_parser.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include "graph.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+#include "version.h"
+
+ManifestParser::ManifestParser(State* state, FileReader* file_reader)
+ : state_(state), file_reader_(file_reader) {
+ env_ = &state->bindings_;
+}
+bool ManifestParser::Load(const string& filename, string* err) {
+ string contents;
+ string read_err;
+ if (!file_reader_->ReadFile(filename, &contents, &read_err)) {
+ *err = "loading '" + filename + "': " + read_err;
+ return false;
+ }
+ contents.resize(contents.size() + 10);
+ return Parse(filename, contents, err);
+}
+
+bool ManifestParser::Parse(const string& filename, const string& input,
+ string* err) {
+ METRIC_RECORD(".ninja parse");
+ lexer_.Start(filename, input);
+
+ for (;;) {
+ Lexer::Token token = lexer_.ReadToken();
+ switch (token) {
+ case Lexer::POOL:
+ if (!ParsePool(err))
+ return false;
+ break;
+ case Lexer::BUILD:
+ if (!ParseEdge(err))
+ return false;
+ break;
+ case Lexer::RULE:
+ if (!ParseRule(err))
+ return false;
+ break;
+ case Lexer::DEFAULT:
+ if (!ParseDefault(err))
+ return false;
+ break;
+ case Lexer::IDENT: {
+ lexer_.UnreadToken();
+ string name;
+ EvalString let_value;
+ if (!ParseLet(&name, &let_value, err))
+ return false;
+ string value = let_value.Evaluate(env_);
+ // Check ninja_required_version immediately so we can exit
+ // before encountering any syntactic surprises.
+ if (name == "ninja_required_version")
+ CheckNinjaVersion(value);
+ env_->AddBinding(name, value);
+ break;
+ }
+ case Lexer::INCLUDE:
+ if (!ParseFileInclude(false, err))
+ return false;
+ break;
+ case Lexer::SUBNINJA:
+ if (!ParseFileInclude(true, err))
+ return false;
+ break;
+ case Lexer::ERROR: {
+ return lexer_.Error(lexer_.DescribeLastError(), err);
+ }
+ case Lexer::TEOF:
+ return true;
+ case Lexer::NEWLINE:
+ break;
+ default:
+ return lexer_.Error(string("unexpected ") + Lexer::TokenName(token),
+ err);
+ }
+ }
+ return false; // not reached
+}
+
+
+bool ManifestParser::ParsePool(string* err) {
+ string name;
+ if (!lexer_.ReadIdent(&name))
+ return lexer_.Error("expected pool name", err);
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ if (state_->LookupPool(name) != NULL)
+ return lexer_.Error("duplicate pool '" + name + "'", err);
+
+ int depth = -1;
+
+ while (lexer_.PeekToken(Lexer::INDENT)) {
+ string key;
+ EvalString value;
+ if (!ParseLet(&key, &value, err))
+ return false;
+
+ if (key == "depth") {
+ string depth_string = value.Evaluate(env_);
+ depth = atol(depth_string.c_str());
+ if (depth < 0)
+ return lexer_.Error("invalid pool depth", err);
+ } else {
+ return lexer_.Error("unexpected variable '" + key + "'", err);
+ }
+ }
+
+ if (depth < 0)
+ return lexer_.Error("expected 'depth =' line", err);
+
+ state_->AddPool(new Pool(name, depth));
+ return true;
+}
+
+
+bool ManifestParser::ParseRule(string* err) {
+ string name;
+ if (!lexer_.ReadIdent(&name))
+ return lexer_.Error("expected rule name", err);
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ if (state_->LookupRule(name) != NULL) {
+ *err = "duplicate rule '" + name + "'";
+ return false;
+ }
+
+ Rule* rule = new Rule(name); // XXX scoped_ptr
+
+ while (lexer_.PeekToken(Lexer::INDENT)) {
+ string key;
+ EvalString value;
+ if (!ParseLet(&key, &value, err))
+ return false;
+
+ if (Rule::IsReservedBinding(key)) {
+ rule->AddBinding(key, value);
+ } else {
+ // Die on other keyvals for now; revisit if we want to add a
+ // scope here.
+ return lexer_.Error("unexpected variable '" + key + "'", err);
+ }
+ }
+
+ if (rule->bindings_["rspfile"].empty() !=
+ rule->bindings_["rspfile_content"].empty()) {
+ return lexer_.Error("rspfile and rspfile_content need to be "
+ "both specified", err);
+ }
+
+ if (rule->bindings_["command"].empty())
+ return lexer_.Error("expected 'command =' line", err);
+
+ state_->AddRule(rule);
+ return true;
+}
+
+bool ManifestParser::ParseLet(string* key, EvalString* value, string* err) {
+ if (!lexer_.ReadIdent(key))
+ return false;
+ if (!ExpectToken(Lexer::EQUALS, err))
+ return false;
+ if (!lexer_.ReadVarValue(value, err))
+ return false;
+ return true;
+}
+
+bool ManifestParser::ParseDefault(string* err) {
+ EvalString eval;
+ if (!lexer_.ReadPath(&eval, err))
+ return false;
+ if (eval.empty())
+ return lexer_.Error("expected target name", err);
+
+ do {
+ string path = eval.Evaluate(env_);
+ string path_err;
+ if (!CanonicalizePath(&path, &path_err))
+ return lexer_.Error(path_err, err);
+ if (!state_->AddDefault(path, &path_err))
+ return lexer_.Error(path_err, err);
+
+ eval.Clear();
+ if (!lexer_.ReadPath(&eval, err))
+ return false;
+ } while (!eval.empty());
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ return true;
+}
+
+bool ManifestParser::ParseEdge(string* err) {
+ vector<EvalString> ins, outs;
+
+ {
+ EvalString out;
+ if (!lexer_.ReadPath(&out, err))
+ return false;
+ if (out.empty())
+ return lexer_.Error("expected path", err);
+
+ do {
+ outs.push_back(out);
+
+ out.Clear();
+ if (!lexer_.ReadPath(&out, err))
+ return false;
+ } while (!out.empty());
+ }
+
+ if (!ExpectToken(Lexer::COLON, err))
+ return false;
+
+ string rule_name;
+ if (!lexer_.ReadIdent(&rule_name))
+ return lexer_.Error("expected build command name", err);
+
+ const Rule* rule = state_->LookupRule(rule_name);
+ if (!rule)
+ return lexer_.Error("unknown build rule '" + rule_name + "'", err);
+
+ for (;;) {
+ // XXX should we require one path here?
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return false;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ }
+
+ // Add all implicit deps, counting how many as we go.
+ int implicit = 0;
+ if (lexer_.PeekToken(Lexer::PIPE)) {
+ for (;;) {
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return err;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ ++implicit;
+ }
+ }
+
+ // Add all order-only deps, counting how many as we go.
+ int order_only = 0;
+ if (lexer_.PeekToken(Lexer::PIPE2)) {
+ for (;;) {
+ EvalString in;
+ if (!lexer_.ReadPath(&in, err))
+ return false;
+ if (in.empty())
+ break;
+ ins.push_back(in);
+ ++order_only;
+ }
+ }
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ // XXX scoped_ptr to handle error case.
+ BindingEnv* env = new BindingEnv(env_);
+
+ while (lexer_.PeekToken(Lexer::INDENT)) {
+ string key;
+ EvalString val;
+ if (!ParseLet(&key, &val, err))
+ return false;
+
+ env->AddBinding(key, val.Evaluate(env_));
+ }
+
+ Edge* edge = state_->AddEdge(rule);
+ edge->env_ = env;
+
+ string pool_name = edge->GetBinding("pool");
+ if (!pool_name.empty()) {
+ Pool* pool = state_->LookupPool(pool_name);
+ if (pool == NULL)
+ return lexer_.Error("unknown pool name", err);
+ edge->pool_ = pool;
+ }
+
+ for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
+ string path = i->Evaluate(env);
+ string path_err;
+ if (!CanonicalizePath(&path, &path_err))
+ return lexer_.Error(path_err, err);
+ state_->AddIn(edge, path);
+ }
+ for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
+ string path = i->Evaluate(env);
+ string path_err;
+ if (!CanonicalizePath(&path, &path_err))
+ return lexer_.Error(path_err, err);
+ state_->AddOut(edge, path);
+ }
+ edge->implicit_deps_ = implicit;
+ edge->order_only_deps_ = order_only;
+
+ // Multiple outputs aren't (yet?) supported with depslog.
+ string deps_type = edge->GetBinding("deps");
+ if (!deps_type.empty() && edge->outputs_.size() > 1) {
+ return lexer_.Error("multiple outputs aren't (yet?) supported by depslog; "
+ "bring this up on the mailing list if it affects you",
+ err);
+ }
+
+ return true;
+}
+
+bool ManifestParser::ParseFileInclude(bool new_scope, string* err) {
+ // XXX this should use ReadPath!
+ EvalString eval;
+ if (!lexer_.ReadPath(&eval, err))
+ return false;
+ string path = eval.Evaluate(env_);
+
+ string contents;
+ string read_err;
+ if (!file_reader_->ReadFile(path, &contents, &read_err))
+ return lexer_.Error("loading '" + path + "': " + read_err, err);
+
+ ManifestParser subparser(state_, file_reader_);
+ if (new_scope) {
+ subparser.env_ = new BindingEnv(env_);
+ } else {
+ subparser.env_ = env_;
+ }
+
+ if (!subparser.Parse(path, contents, err))
+ return false;
+
+ if (!ExpectToken(Lexer::NEWLINE, err))
+ return false;
+
+ return true;
+}
+
+bool ManifestParser::ExpectToken(Lexer::Token expected, string* err) {
+ Lexer::Token token = lexer_.ReadToken();
+ if (token != expected) {
+ string message = string("expected ") + Lexer::TokenName(expected);
+ message += string(", got ") + Lexer::TokenName(token);
+ message += Lexer::TokenErrorHint(expected);
+ return lexer_.Error(message, err);
+ }
+ return true;
+}
diff --git a/ninja/src/manifest_parser.h b/ninja/src/manifest_parser.h
new file mode 100644
index 00000000000..967dfddc306
--- /dev/null
+++ b/ninja/src/manifest_parser.h
@@ -0,0 +1,69 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_MANIFEST_PARSER_H_
+#define NINJA_MANIFEST_PARSER_H_
+
+#include <string>
+
+using namespace std;
+
+#include "lexer.h"
+
+struct BindingEnv;
+struct EvalString;
+struct State;
+
+/// Parses .ninja files.
+struct ManifestParser {
+ struct FileReader {
+ virtual ~FileReader() {}
+ virtual bool ReadFile(const string& path, string* content, string* err) = 0;
+ };
+
+ ManifestParser(State* state, FileReader* file_reader);
+
+ /// Load and parse a file.
+ bool Load(const string& filename, string* err);
+
+ /// Parse a text string of input. Used by tests.
+ bool ParseTest(const string& input, string* err) {
+ return Parse("input", input, err);
+ }
+
+private:
+ /// Parse a file, given its contents as a string.
+ bool Parse(const string& filename, const string& input, string* err);
+
+ /// Parse various statement types.
+ bool ParsePool(string* err);
+ bool ParseRule(string* err);
+ bool ParseLet(string* key, EvalString* val, string* err);
+ bool ParseEdge(string* err);
+ bool ParseDefault(string* err);
+
+ /// Parse either a 'subninja' or 'include' line.
+ bool ParseFileInclude(bool new_scope, string* err);
+
+ /// If the next token is not \a expected, produce an error string
+ /// saying "expectd foo, got bar".
+ bool ExpectToken(Lexer::Token expected, string* err);
+
+ State* state_;
+ BindingEnv* env_;
+ FileReader* file_reader_;
+ Lexer lexer_;
+};
+
+#endif // NINJA_MANIFEST_PARSER_H_
diff --git a/ninja/src/manifest_parser_test.cc b/ninja/src/manifest_parser_test.cc
new file mode 100644
index 00000000000..2638edc488f
--- /dev/null
+++ b/ninja/src/manifest_parser_test.cc
@@ -0,0 +1,740 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "manifest_parser.h"
+
+#include <map>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "graph.h"
+#include "state.h"
+
+struct ParserTest : public testing::Test,
+ public ManifestParser::FileReader {
+ void AssertParse(const char* input) {
+ ManifestParser parser(&state, this);
+ string err;
+ ASSERT_TRUE(parser.ParseTest(input, &err)) << err;
+ ASSERT_EQ("", err);
+ }
+
+ virtual bool ReadFile(const string& path, string* content, string* err) {
+ files_read_.push_back(path);
+ map<string, string>::iterator i = files_.find(path);
+ if (i == files_.end()) {
+ *err = "No such file or directory"; // Match strerror() for ENOENT.
+ return false;
+ }
+ *content = i->second;
+ return true;
+ }
+
+ State state;
+ map<string, string> files_;
+ vector<string> files_read_;
+};
+
+TEST_F(ParserTest, Empty) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(""));
+}
+
+TEST_F(ParserTest, Rules) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"\n"
+"rule date\n"
+" command = date > $out\n"
+"\n"
+"build result: cat in_1.cc in-2.O\n"));
+
+ ASSERT_EQ(3u, state.rules_.size());
+ const Rule* rule = state.rules_.begin()->second;
+ EXPECT_EQ("cat", rule->name());
+ EXPECT_EQ("[cat ][$in][ > ][$out]",
+ rule->GetBinding("command")->Serialize());
+}
+
+TEST_F(ParserTest, RuleAttributes) {
+ // Check that all of the allowed rule attributes are parsed ok.
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = a\n"
+" depfile = a\n"
+" deps = a\n"
+" description = a\n"
+" generator = a\n"
+" restat = a\n"
+" rspfile = a\n"
+" rspfile_content = a\n"
+));
+}
+
+TEST_F(ParserTest, IgnoreIndentedComments) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+" #indented comment\n"
+"rule cat\n"
+" command = cat $in > $out\n"
+" #generator = 1\n"
+" restat = 1 # comment\n"
+" #comment\n"
+"build result: cat in_1.cc in-2.O\n"
+" #comment\n"));
+
+ ASSERT_EQ(2u, state.rules_.size());
+ const Rule* rule = state.rules_.begin()->second;
+ EXPECT_EQ("cat", rule->name());
+ Edge* edge = state.GetNode("result")->in_edge();
+ EXPECT_TRUE(edge->GetBindingBool("restat"));
+ EXPECT_FALSE(edge->GetBindingBool("generator"));
+}
+
+TEST_F(ParserTest, IgnoreIndentedBlankLines) {
+ // the indented blanks used to cause parse errors
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+" \n"
+"rule cat\n"
+" command = cat $in > $out\n"
+" \n"
+"build result: cat in_1.cc in-2.O\n"
+" \n"
+"variable=1\n"));
+
+ // the variable must be in the top level environment
+ EXPECT_EQ("1", state.bindings_.LookupVariable("variable"));
+}
+
+TEST_F(ParserTest, ResponseFiles) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat_rsp\n"
+" command = cat $rspfile > $out\n"
+" rspfile = $rspfile\n"
+" rspfile_content = $in\n"
+"\n"
+"build out: cat_rsp in\n"
+" rspfile=out.rsp\n"));
+
+ ASSERT_EQ(2u, state.rules_.size());
+ const Rule* rule = state.rules_.begin()->second;
+ EXPECT_EQ("cat_rsp", rule->name());
+ EXPECT_EQ("[cat ][$rspfile][ > ][$out]",
+ rule->GetBinding("command")->Serialize());
+ EXPECT_EQ("[$rspfile]", rule->GetBinding("rspfile")->Serialize());
+ EXPECT_EQ("[$in]", rule->GetBinding("rspfile_content")->Serialize());
+}
+
+TEST_F(ParserTest, InNewline) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat_rsp\n"
+" command = cat $in_newline > $out\n"
+"\n"
+"build out: cat_rsp in in2\n"
+" rspfile=out.rsp\n"));
+
+ ASSERT_EQ(2u, state.rules_.size());
+ const Rule* rule = state.rules_.begin()->second;
+ EXPECT_EQ("cat_rsp", rule->name());
+ EXPECT_EQ("[cat ][$in_newline][ > ][$out]",
+ rule->GetBinding("command")->Serialize());
+
+ Edge* edge = state.edges_[0];
+ EXPECT_EQ("cat in\nin2 > out", edge->EvaluateCommand());
+}
+
+TEST_F(ParserTest, Variables) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"l = one-letter-test\n"
+"rule link\n"
+" command = ld $l $extra $with_under -o $out $in\n"
+"\n"
+"extra = -pthread\n"
+"with_under = -under\n"
+"build a: link b c\n"
+"nested1 = 1\n"
+"nested2 = $nested1/2\n"
+"build supernested: link x\n"
+" extra = $nested2/3\n"));
+
+ ASSERT_EQ(2u, state.edges_.size());
+ Edge* edge = state.edges_[0];
+ EXPECT_EQ("ld one-letter-test -pthread -under -o a b c",
+ edge->EvaluateCommand());
+ EXPECT_EQ("1/2", state.bindings_.LookupVariable("nested2"));
+
+ edge = state.edges_[1];
+ EXPECT_EQ("ld one-letter-test 1/2/3 -under -o supernested x",
+ edge->EvaluateCommand());
+}
+
+TEST_F(ParserTest, VariableScope) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"foo = bar\n"
+"rule cmd\n"
+" command = cmd $foo $in $out\n"
+"\n"
+"build inner: cmd a\n"
+" foo = baz\n"
+"build outer: cmd b\n"
+"\n" // Extra newline after build line tickles a regression.
+));
+
+ ASSERT_EQ(2u, state.edges_.size());
+ EXPECT_EQ("cmd baz a inner", state.edges_[0]->EvaluateCommand());
+ EXPECT_EQ("cmd bar b outer", state.edges_[1]->EvaluateCommand());
+}
+
+TEST_F(ParserTest, Continuation) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule link\n"
+" command = foo bar $\n"
+" baz\n"
+"\n"
+"build a: link c $\n"
+" d e f\n"));
+
+ ASSERT_EQ(2u, state.rules_.size());
+ const Rule* rule = state.rules_.begin()->second;
+ EXPECT_EQ("link", rule->name());
+ EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize());
+}
+
+TEST_F(ParserTest, Backslash) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"foo = bar\\baz\n"
+"foo2 = bar\\ baz\n"
+));
+ EXPECT_EQ("bar\\baz", state.bindings_.LookupVariable("foo"));
+ EXPECT_EQ("bar\\ baz", state.bindings_.LookupVariable("foo2"));
+}
+
+TEST_F(ParserTest, Comment) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"# this is a comment\n"
+"foo = not # a comment\n"));
+ EXPECT_EQ("not # a comment", state.bindings_.LookupVariable("foo"));
+}
+
+TEST_F(ParserTest, Dollars) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule foo\n"
+" command = ${out}bar$$baz$$$\n"
+"blah\n"
+"x = $$dollar\n"
+"build $x: foo y\n"
+));
+ EXPECT_EQ("$dollar", state.bindings_.LookupVariable("x"));
+ EXPECT_EQ("$dollarbar$baz$blah", state.edges_[0]->EvaluateCommand());
+}
+
+TEST_F(ParserTest, EscapeSpaces) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule spaces\n"
+" command = something\n"
+"build foo$ bar: spaces $$one two$$$ three\n"
+));
+ EXPECT_TRUE(state.LookupNode("foo bar"));
+ EXPECT_EQ(state.edges_[0]->outputs_[0]->path(), "foo bar");
+ EXPECT_EQ(state.edges_[0]->inputs_[0]->path(), "$one");
+ EXPECT_EQ(state.edges_[0]->inputs_[1]->path(), "two$ three");
+ EXPECT_EQ(state.edges_[0]->EvaluateCommand(), "something");
+}
+
+TEST_F(ParserTest, CanonicalizeFile) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out: cat in/1 in//2\n"
+"build in/1: cat\n"
+"build in/2: cat\n"));
+
+ EXPECT_TRUE(state.LookupNode("in/1"));
+ EXPECT_TRUE(state.LookupNode("in/2"));
+ EXPECT_FALSE(state.LookupNode("in//1"));
+ EXPECT_FALSE(state.LookupNode("in//2"));
+}
+
+TEST_F(ParserTest, PathVariables) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"dir = out\n"
+"build $dir/exe: cat src\n"));
+
+ EXPECT_FALSE(state.LookupNode("$dir/exe"));
+ EXPECT_TRUE(state.LookupNode("out/exe"));
+}
+
+TEST_F(ParserTest, CanonicalizePaths) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build ./out.o: cat ./bar/baz/../foo.cc\n"));
+
+ EXPECT_FALSE(state.LookupNode("./out.o"));
+ EXPECT_TRUE(state.LookupNode("out.o"));
+ EXPECT_FALSE(state.LookupNode("./bar/baz/../foo.cc"));
+ EXPECT_TRUE(state.LookupNode("bar/foo.cc"));
+}
+
+TEST_F(ParserTest, ReservedWords) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule build\n"
+" command = rule run $out\n"
+"build subninja: build include default foo.cc\n"
+"default subninja\n"));
+}
+
+TEST_F(ParserTest, Errors) {
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("foobar", &err));
+ EXPECT_EQ("input:1: expected '=', got eof\n"
+ "foobar\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x 3", &err));
+ EXPECT_EQ("input:1: expected '=', got identifier\n"
+ "x 3\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = 3", &err));
+ EXPECT_EQ("input:1: unexpected EOF\n"
+ "x = 3\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = 3\ny 2", &err));
+ EXPECT_EQ("input:2: expected '=', got identifier\n"
+ "y 2\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = $", &err));
+ EXPECT_EQ("input:1: bad $-escape (literal $ must be written as $$)\n"
+ "x = $\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = $\n $[\n", &err));
+ EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n"
+ " $[\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("x = a$\n b$\n $\n", &err));
+ EXPECT_EQ("input:4: unexpected EOF\n"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("build x: y z\n", &err));
+ EXPECT_EQ("input:1: unknown build rule 'y'\n"
+ "build x: y z\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("build x:: y z\n", &err));
+ EXPECT_EQ("input:1: expected build command name\n"
+ "build x:: y z\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n command = cat ok\n"
+ "build x: cat $\n :\n",
+ &err));
+ EXPECT_EQ("input:4: expected newline, got ':'\n"
+ " :\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n",
+ &err));
+ EXPECT_EQ("input:2: expected 'command =' line\n", err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = ${fafsd\n"
+ "foo = bar\n",
+ &err));
+ EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n"
+ " command = ${fafsd\n"
+ " ^ near here"
+ , err);
+ }
+
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = cat\n"
+ "build $.: cat foo\n",
+ &err));
+ EXPECT_EQ("input:3: bad $-escape (literal $ must be written as $$)\n"
+ "build $.: cat foo\n"
+ " ^ near here"
+ , err);
+ }
+
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ " command = cat\n"
+ "build $: cat foo\n",
+ &err));
+ EXPECT_EQ("input:3: expected ':', got newline ($ also escapes ':')\n"
+ "build $: cat foo\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule %foo\n",
+ &err));
+ EXPECT_EQ("input:1: expected rule name\n", err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n"
+ " command = foo\n"
+ " othervar = bar\n",
+ &err));
+ EXPECT_EQ("input:3: unexpected variable 'othervar'\n"
+ " othervar = bar\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n"
+ "build $.: cc bar.cc\n",
+ &err));
+ EXPECT_EQ("input:3: bad $-escape (literal $ must be written as $$)\n"
+ "build $.: cc bar.cc\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n"
+ "build $: cc bar.cc\n",
+ &err));
+ EXPECT_EQ("input:3: expected ':', got newline ($ also escapes ':')\n"
+ "build $: cc bar.cc\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("default\n",
+ &err));
+ EXPECT_EQ("input:1: expected target name\n"
+ "default\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("default nonexistent\n",
+ &err));
+ EXPECT_EQ("input:1: unknown target 'nonexistent'\n"
+ "default nonexistent\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule r\n command = r\n"
+ "build b: r\n"
+ "default b:\n",
+ &err));
+ EXPECT_EQ("input:4: expected newline, got ':'\n"
+ "default b:\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("default $a\n", &err));
+ EXPECT_EQ("input:1: empty path\n"
+ "default $a\n"
+ " ^ near here"
+ , err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule r\n"
+ " command = r\n"
+ "build $a: r $c\n", &err));
+ // XXX the line number is wrong; we should evaluate paths in ParseEdge
+ // as we see them, not after we've read them all!
+ EXPECT_EQ("input:4: empty path\n", err);
+ }
+
+ {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ // the indented blank line must terminate the rule
+ // this also verifies that "unexpected (token)" errors are correct
+ EXPECT_FALSE(parser.ParseTest("rule r\n"
+ " command = r\n"
+ " \n"
+ " generator = 1\n", &err));
+ EXPECT_EQ("input:4: unexpected indent\n", err);
+ }
+}
+
+TEST_F(ParserTest, MissingInput) {
+ State state;
+ ManifestParser parser(&state, this);
+ string err;
+ EXPECT_FALSE(parser.Load("build.ninja", &err));
+ EXPECT_EQ("loading 'build.ninja': No such file or directory", err);
+}
+
+TEST_F(ParserTest, MultipleOutputs) {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_TRUE(parser.ParseTest("rule cc\n command = foo\n depfile = bar\n"
+ "build a.o b.o: cc c.cc\n",
+ &err));
+ EXPECT_EQ("", err);
+}
+
+TEST_F(ParserTest, MultipleOutputsWithDeps) {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n deps = gcc\n"
+ "build a.o b.o: cc c.cc\n",
+ &err));
+ EXPECT_EQ("input:5: multiple outputs aren't (yet?) supported by depslog; "
+ "bring this up on the mailing list if it affects you\n", err);
+}
+
+TEST_F(ParserTest, SubNinja) {
+ files_["test.ninja"] =
+ "var = inner\n"
+ "build $builddir/inner: varref\n";
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"builddir = some_dir/\n"
+"rule varref\n"
+" command = varref $var\n"
+"var = outer\n"
+"build $builddir/outer: varref\n"
+"subninja test.ninja\n"
+"build $builddir/outer2: varref\n"));
+ ASSERT_EQ(1u, files_read_.size());
+
+ EXPECT_EQ("test.ninja", files_read_[0]);
+ EXPECT_TRUE(state.LookupNode("some_dir/outer"));
+ // Verify our builddir setting is inherited.
+ EXPECT_TRUE(state.LookupNode("some_dir/inner"));
+
+ ASSERT_EQ(3u, state.edges_.size());
+ EXPECT_EQ("varref outer", state.edges_[0]->EvaluateCommand());
+ EXPECT_EQ("varref inner", state.edges_[1]->EvaluateCommand());
+ EXPECT_EQ("varref outer", state.edges_[2]->EvaluateCommand());
+}
+
+TEST_F(ParserTest, MissingSubNinja) {
+ ManifestParser parser(&state, this);
+ string err;
+ EXPECT_FALSE(parser.ParseTest("subninja foo.ninja\n", &err));
+ EXPECT_EQ("input:1: loading 'foo.ninja': No such file or directory\n"
+ "subninja foo.ninja\n"
+ " ^ near here"
+ , err);
+}
+
+TEST_F(ParserTest, Include) {
+ files_["include.ninja"] = "var = inner\n";
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"var = outer\n"
+"include include.ninja\n"));
+
+ ASSERT_EQ(1u, files_read_.size());
+ EXPECT_EQ("include.ninja", files_read_[0]);
+ EXPECT_EQ("inner", state.bindings_.LookupVariable("var"));
+}
+
+TEST_F(ParserTest, Implicit) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build foo: cat bar | baz\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_TRUE(edge->is_implicit(1));
+}
+
+TEST_F(ParserTest, OrderOnly) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build foo: cat bar || baz\n"));
+
+ Edge* edge = state.LookupNode("foo")->in_edge();
+ ASSERT_TRUE(edge->is_order_only(1));
+}
+
+TEST_F(ParserTest, DefaultDefault) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build a: cat foo\n"
+"build b: cat foo\n"
+"build c: cat foo\n"
+"build d: cat foo\n"));
+
+ string err;
+ EXPECT_EQ(4u, state.DefaultNodes(&err).size());
+ EXPECT_EQ("", err);
+}
+
+TEST_F(ParserTest, DefaultStatements) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build a: cat foo\n"
+"build b: cat foo\n"
+"build c: cat foo\n"
+"build d: cat foo\n"
+"third = c\n"
+"default a b\n"
+"default $third\n"));
+
+ string err;
+ vector<Node*> nodes = state.DefaultNodes(&err);
+ EXPECT_EQ("", err);
+ ASSERT_EQ(3u, nodes.size());
+ EXPECT_EQ("a", nodes[0]->path());
+ EXPECT_EQ("b", nodes[1]->path());
+ EXPECT_EQ("c", nodes[2]->path());
+}
+
+TEST_F(ParserTest, UTF8) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule utf8\n"
+" command = true\n"
+" description = compilaci\xC3\xB3\n"));
+}
+
+// We might want to eventually allow CRLF to be nice to Windows developers,
+// but for now just verify we error out with a nice message.
+TEST_F(ParserTest, CRLF) {
+ State state;
+ ManifestParser parser(&state, NULL);
+ string err;
+
+ EXPECT_FALSE(parser.ParseTest("# comment with crlf\r\n",
+ &err));
+ EXPECT_EQ("input:1: lexing error\n",
+ err);
+
+ EXPECT_FALSE(parser.ParseTest("foo = foo\nbar = bar\r\n",
+ &err));
+ EXPECT_EQ("input:2: carriage returns are not allowed, use newlines\n"
+ "bar = bar\r\n"
+ " ^ near here",
+ err);
+}
diff --git a/ninja/src/metrics.cc b/ninja/src/metrics.cc
new file mode 100644
index 00000000000..ca4f97a350b
--- /dev/null
+++ b/ninja/src/metrics.cc
@@ -0,0 +1,125 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "metrics.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifndef _WIN32
+#include <sys/time.h>
+#else
+#include <windows.h>
+#endif
+
+#include "util.h"
+
+Metrics* g_metrics = NULL;
+
+namespace {
+
+#ifndef _WIN32
+/// Compute a platform-specific high-res timer value that fits into an int64.
+int64_t HighResTimer() {
+ timeval tv;
+ if (gettimeofday(&tv, NULL) < 0)
+ Fatal("gettimeofday: %s", strerror(errno));
+ return (int64_t)tv.tv_sec * 1000*1000 + tv.tv_usec;
+}
+
+/// Convert a delta of HighResTimer() values to microseconds.
+int64_t TimerToMicros(int64_t dt) {
+ // No conversion necessary.
+ return dt;
+}
+#else
+int64_t LargeIntegerToInt64(const LARGE_INTEGER& i) {
+ return ((int64_t)i.HighPart) << 32 | i.LowPart;
+}
+
+int64_t HighResTimer() {
+ LARGE_INTEGER counter;
+ if (!QueryPerformanceCounter(&counter))
+ Fatal("QueryPerformanceCounter: %s", GetLastErrorString().c_str());
+ return LargeIntegerToInt64(counter);
+}
+
+int64_t TimerToMicros(int64_t dt) {
+ static int64_t ticks_per_sec = 0;
+ if (!ticks_per_sec) {
+ LARGE_INTEGER freq;
+ if (!QueryPerformanceFrequency(&freq))
+ Fatal("QueryPerformanceFrequency: %s", GetLastErrorString().c_str());
+ ticks_per_sec = LargeIntegerToInt64(freq);
+ }
+
+ // dt is in ticks. We want microseconds.
+ return (dt * 1000000) / ticks_per_sec;
+}
+#endif
+
+} // anonymous namespace
+
+
+ScopedMetric::ScopedMetric(Metric* metric) {
+ metric_ = metric;
+ if (!metric_)
+ return;
+ start_ = HighResTimer();
+}
+ScopedMetric::~ScopedMetric() {
+ if (!metric_)
+ return;
+ metric_->count++;
+ int64_t dt = TimerToMicros(HighResTimer() - start_);
+ metric_->sum += dt;
+}
+
+Metric* Metrics::NewMetric(const string& name) {
+ Metric* metric = new Metric;
+ metric->name = name;
+ metric->count = 0;
+ metric->sum = 0;
+ metrics_.push_back(metric);
+ return metric;
+}
+
+void Metrics::Report() {
+ int width = 0;
+ for (vector<Metric*>::iterator i = metrics_.begin();
+ i != metrics_.end(); ++i) {
+ width = max((int)(*i)->name.size(), width);
+ }
+
+ printf("%-*s\t%-6s\t%-9s\t%s\n", width,
+ "metric", "count", "avg (us)", "total (ms)");
+ for (vector<Metric*>::iterator i = metrics_.begin();
+ i != metrics_.end(); ++i) {
+ Metric* metric = *i;
+ double total = metric->sum / (double)1000;
+ double avg = metric->sum / (double)metric->count;
+ printf("%-*s\t%-6d\t%-8.1f\t%.1f\n", width, metric->name.c_str(),
+ metric->count, avg, total);
+ }
+}
+
+uint64_t Stopwatch::Now() const {
+ return TimerToMicros(HighResTimer());
+}
+
+int64_t GetTimeMillis() {
+ return TimerToMicros(HighResTimer()) / 1000;
+}
+
diff --git a/ninja/src/metrics.h b/ninja/src/metrics.h
new file mode 100644
index 00000000000..b6da859db2b
--- /dev/null
+++ b/ninja/src/metrics.h
@@ -0,0 +1,92 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_METRICS_H_
+#define NINJA_METRICS_H_
+
+#include <string>
+#include <vector>
+using namespace std;
+
+#include "util.h" // For int64_t.
+
+/// The Metrics module is used for the debug mode that dumps timing stats of
+/// various actions. To use, see METRIC_RECORD below.
+
+/// A single metrics we're tracking, like "depfile load time".
+struct Metric {
+ string name;
+ /// Number of times we've hit the code path.
+ int count;
+ /// Total time (in micros) we've spent on the code path.
+ int64_t sum;
+};
+
+
+/// A scoped object for recording a metric across the body of a function.
+/// Used by the METRIC_RECORD macro.
+struct ScopedMetric {
+ explicit ScopedMetric(Metric* metric);
+ ~ScopedMetric();
+
+private:
+ Metric* metric_;
+ /// Timestamp when the measurement started.
+ /// Value is platform-dependent.
+ int64_t start_;
+};
+
+/// The singleton that stores metrics and prints the report.
+struct Metrics {
+ Metric* NewMetric(const string& name);
+
+ /// Print a summary report to stdout.
+ void Report();
+
+private:
+ vector<Metric*> metrics_;
+};
+
+/// Get the current time as relative to some epoch.
+/// Epoch varies between platforms; only useful for measuring elapsed time.
+int64_t GetTimeMillis();
+
+/// A simple stopwatch which returns the time
+/// in seconds since Restart() was called.
+struct Stopwatch {
+ public:
+ Stopwatch() : started_(0) {}
+
+ /// Seconds since Restart() call.
+ double Elapsed() const {
+ return 1e-6 * static_cast<double>(Now() - started_);
+ }
+
+ void Restart() { started_ = Now(); }
+
+ private:
+ uint64_t started_;
+ uint64_t Now() const;
+};
+
+/// The primary interface to metrics. Use METRIC_RECORD("foobar") at the top
+/// of a function to get timing stats recorded for each call of the function.
+#define METRIC_RECORD(name) \
+ static Metric* metrics_h_metric = \
+ g_metrics ? g_metrics->NewMetric(name) : NULL; \
+ ScopedMetric metrics_h_scoped(metrics_h_metric);
+
+extern Metrics* g_metrics;
+
+#endif // NINJA_METRICS_H_
diff --git a/ninja/src/minidump-win32.cc b/ninja/src/minidump-win32.cc
new file mode 100644
index 00000000000..c79ec0e9cb2
--- /dev/null
+++ b/ninja/src/minidump-win32.cc
@@ -0,0 +1,88 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_BOOTSTRAP
+
+#include <windows.h>
+#include <DbgHelp.h>
+
+
+#include "util.h"
+
+typedef BOOL (WINAPI *MiniDumpWriteDumpFunc) (
+ IN HANDLE,
+ IN DWORD,
+ IN HANDLE,
+ IN MINIDUMP_TYPE,
+ IN CONST PMINIDUMP_EXCEPTION_INFORMATION, OPTIONAL
+ IN CONST PMINIDUMP_USER_STREAM_INFORMATION, OPTIONAL
+ IN CONST PMINIDUMP_CALLBACK_INFORMATION OPTIONAL
+ );
+
+/// Creates a windows minidump in temp folder.
+void CreateWin32MiniDump(_EXCEPTION_POINTERS* pep) {
+ char temp_path[MAX_PATH];
+ GetTempPath(sizeof(temp_path), temp_path);
+ char temp_file[MAX_PATH];
+ sprintf(temp_file, "%s\\ninja_crash_dump_%d.dmp",
+ temp_path, GetCurrentProcessId());
+
+ // Delete any previous minidump of the same name.
+ DeleteFile(temp_file);
+
+ // Load DbgHelp.dll dynamically, as library is not present on all
+ // Windows versions.
+ HMODULE dbghelp = LoadLibrary("dbghelp.dll");
+ if (dbghelp == NULL) {
+ Error("failed to create minidump: LoadLibrary('dbghelp.dll'): %s",
+ GetLastErrorString().c_str());
+ return;
+ }
+
+ MiniDumpWriteDumpFunc mini_dump_write_dump =
+ (MiniDumpWriteDumpFunc)GetProcAddress(dbghelp, "MiniDumpWriteDump");
+ if (mini_dump_write_dump == NULL) {
+ Error("failed to create minidump: GetProcAddress('MiniDumpWriteDump'): %s",
+ GetLastErrorString().c_str());
+ return;
+ }
+
+ HANDLE hFile = CreateFileA(temp_file, GENERIC_READ | GENERIC_WRITE, 0, NULL,
+ CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (hFile == NULL) {
+ Error("failed to create minidump: CreateFileA(%s): %s",
+ temp_file, GetLastErrorString().c_str());
+ return;
+ }
+
+ MINIDUMP_EXCEPTION_INFORMATION mdei;
+ mdei.ThreadId = GetCurrentThreadId();
+ mdei.ExceptionPointers = pep;
+ mdei.ClientPointers = FALSE;
+ MINIDUMP_TYPE mdt = (MINIDUMP_TYPE) (MiniDumpWithDataSegs |
+ MiniDumpWithHandleData);
+
+ BOOL rv = mini_dump_write_dump(GetCurrentProcess(), GetCurrentProcessId(),
+ hFile, mdt, (pep != 0) ? &mdei : 0, 0, 0);
+ CloseHandle(hFile);
+
+ if (!rv) {
+ Error("MiniDumpWriteDump failed: %s", GetLastErrorString().c_str());
+ return;
+ }
+
+ Warning("minidump created: %s", temp_file);
+}
+
+#endif // NINJA_BOOTSTRAP
diff --git a/ninja/src/msvc_helper-win32.cc b/ninja/src/msvc_helper-win32.cc
new file mode 100644
index 00000000000..7c45029ccea
--- /dev/null
+++ b/ninja/src/msvc_helper-win32.cc
@@ -0,0 +1,185 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "msvc_helper.h"
+
+#include <algorithm>
+#include <stdio.h>
+#include <string.h>
+#include <windows.h>
+
+#include "includes_normalize.h"
+#include "util.h"
+
+namespace {
+
+/// Return true if \a input ends with \a needle.
+bool EndsWith(const string& input, const string& needle) {
+ return (input.size() >= needle.size() &&
+ input.substr(input.size() - needle.size()) == needle);
+}
+
+string Replace(const string& input, const string& find, const string& replace) {
+ string result = input;
+ size_t start_pos = 0;
+ while ((start_pos = result.find(find, start_pos)) != string::npos) {
+ result.replace(start_pos, find.length(), replace);
+ start_pos += replace.length();
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+string EscapeForDepfile(const string& path) {
+ // Depfiles don't escape single \.
+ return Replace(path, " ", "\\ ");
+}
+
+// static
+string CLParser::FilterShowIncludes(const string& line) {
+ static const char kMagicPrefix[] = "Note: including file: ";
+ const char* in = line.c_str();
+ const char* end = in + line.size();
+
+ if (end - in > (int)sizeof(kMagicPrefix) - 1 &&
+ memcmp(in, kMagicPrefix, sizeof(kMagicPrefix) - 1) == 0) {
+ in += sizeof(kMagicPrefix) - 1;
+ while (*in == ' ')
+ ++in;
+ return line.substr(in - line.c_str());
+ }
+ return "";
+}
+
+// static
+bool CLParser::IsSystemInclude(string path) {
+ transform(path.begin(), path.end(), path.begin(), ::tolower);
+ // TODO: this is a heuristic, perhaps there's a better way?
+ return (path.find("program files") != string::npos ||
+ path.find("microsoft visual studio") != string::npos);
+}
+
+// static
+bool CLParser::FilterInputFilename(string line) {
+ transform(line.begin(), line.end(), line.begin(), ::tolower);
+ // TODO: other extensions, like .asm?
+ return EndsWith(line, ".c") ||
+ EndsWith(line, ".cc") ||
+ EndsWith(line, ".cxx") ||
+ EndsWith(line, ".cpp");
+}
+
+string CLParser::Parse(const string& output) {
+ string filtered_output;
+
+ // Loop over all lines in the output to process them.
+ size_t start = 0;
+ while (start < output.size()) {
+ size_t end = output.find_first_of("\r\n", start);
+ if (end == string::npos)
+ end = output.size();
+ string line = output.substr(start, end - start);
+
+ string include = FilterShowIncludes(line);
+ if (!include.empty()) {
+ include = IncludesNormalize::Normalize(include, NULL);
+ if (!IsSystemInclude(include))
+ includes_.insert(include);
+ } else if (FilterInputFilename(line)) {
+ // Drop it.
+ // TODO: if we support compiling multiple output files in a single
+ // cl.exe invocation, we should stash the filename.
+ } else {
+ filtered_output.append(line);
+ filtered_output.append("\n");
+ }
+
+ if (end < output.size() && output[end] == '\r')
+ ++end;
+ if (end < output.size() && output[end] == '\n')
+ ++end;
+ start = end;
+ }
+
+ return filtered_output;
+}
+
+int CLWrapper::Run(const string& command, string* output) {
+ SECURITY_ATTRIBUTES security_attributes = {};
+ security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ security_attributes.bInheritHandle = TRUE;
+
+ // Must be inheritable so subprocesses can dup to children.
+ HANDLE nul = CreateFile("NUL", GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE |
+ FILE_SHARE_DELETE,
+ &security_attributes, OPEN_EXISTING, 0, NULL);
+ if (nul == INVALID_HANDLE_VALUE)
+ Fatal("couldn't open nul");
+
+ HANDLE stdout_read, stdout_write;
+ if (!CreatePipe(&stdout_read, &stdout_write, &security_attributes, 0))
+ Win32Fatal("CreatePipe");
+
+ if (!SetHandleInformation(stdout_read, HANDLE_FLAG_INHERIT, 0))
+ Win32Fatal("SetHandleInformation");
+
+ PROCESS_INFORMATION process_info = {};
+ STARTUPINFO startup_info = {};
+ startup_info.cb = sizeof(STARTUPINFO);
+ startup_info.hStdInput = nul;
+ startup_info.hStdError = stdout_write;
+ startup_info.hStdOutput = stdout_write;
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+
+ if (!CreateProcessA(NULL, (char*)command.c_str(), NULL, NULL,
+ /* inherit handles */ TRUE, 0,
+ env_block_, NULL,
+ &startup_info, &process_info)) {
+ Win32Fatal("CreateProcess");
+ }
+
+ if (!CloseHandle(nul) ||
+ !CloseHandle(stdout_write)) {
+ Win32Fatal("CloseHandle");
+ }
+
+ // Read all output of the subprocess.
+ DWORD read_len = 1;
+ while (read_len) {
+ char buf[64 << 10];
+ read_len = 0;
+ if (!::ReadFile(stdout_read, buf, sizeof(buf), &read_len, NULL) &&
+ GetLastError() != ERROR_BROKEN_PIPE) {
+ Win32Fatal("ReadFile");
+ }
+ output->append(buf, read_len);
+ }
+
+ // Wait for it to exit and grab its exit code.
+ if (WaitForSingleObject(process_info.hProcess, INFINITE) == WAIT_FAILED)
+ Win32Fatal("WaitForSingleObject");
+ DWORD exit_code = 0;
+ if (!GetExitCodeProcess(process_info.hProcess, &exit_code))
+ Win32Fatal("GetExitCodeProcess");
+
+ if (!CloseHandle(stdout_read) ||
+ !CloseHandle(process_info.hProcess) ||
+ !CloseHandle(process_info.hThread)) {
+ Win32Fatal("CloseHandle");
+ }
+
+ return exit_code;
+}
diff --git a/ninja/src/msvc_helper.h b/ninja/src/msvc_helper.h
new file mode 100644
index 00000000000..e207485bfcd
--- /dev/null
+++ b/ninja/src/msvc_helper.h
@@ -0,0 +1,62 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+#include <set>
+#include <vector>
+using namespace std;
+
+string EscapeForDepfile(const string& path);
+
+/// Visual Studio's cl.exe requires some massaging to work with Ninja;
+/// for example, it emits include information on stderr in a funny
+/// format when building with /showIncludes. This class parses this
+/// output.
+struct CLParser {
+ /// Parse a line of cl.exe output and extract /showIncludes info.
+ /// If a dependency is extracted, returns a nonempty string.
+ /// Exposed for testing.
+ static string FilterShowIncludes(const string& line);
+
+ /// Return true if a mentioned include file is a system path.
+ /// Filtering these out reduces dependency information considerably.
+ static bool IsSystemInclude(string path);
+
+ /// Parse a line of cl.exe output and return true if it looks like
+ /// it's printing an input filename. This is a heuristic but it appears
+ /// to be the best we can do.
+ /// Exposed for testing.
+ static bool FilterInputFilename(string line);
+
+ /// Parse the full output of cl, returning the output (if any) that
+ /// should printed.
+ string Parse(const string& output);
+
+ set<string> includes_;
+};
+
+/// Wraps a synchronous execution of a CL subprocess.
+struct CLWrapper {
+ CLWrapper() : env_block_(NULL) {}
+
+ /// Set the environment block (as suitable for CreateProcess) to be used
+ /// by Run().
+ void SetEnvBlock(void* env_block) { env_block_ = env_block; }
+
+ /// Start a process and gather its raw output. Returns its exit code.
+ /// Crashes (calls Fatal()) on error.
+ int Run(const string& command, string* output);
+
+ void* env_block_;
+};
diff --git a/ninja/src/msvc_helper_main-win32.cc b/ninja/src/msvc_helper_main-win32.cc
new file mode 100644
index 00000000000..8a0479c6497
--- /dev/null
+++ b/ninja/src/msvc_helper_main-win32.cc
@@ -0,0 +1,135 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "msvc_helper.h"
+
+#include <fcntl.h>
+#include <io.h>
+#include <stdio.h>
+#include <windows.h>
+
+#include "util.h"
+
+#include "getopt.h"
+
+namespace {
+
+void Usage() {
+ printf(
+"usage: ninja -t msvc [options] -- cl.exe /showIncludes /otherArgs\n"
+"options:\n"
+" -e ENVFILE load environment block from ENVFILE as environment\n"
+" -o FILE write output dependency information to FILE.d\n"
+ );
+}
+
+void PushPathIntoEnvironment(const string& env_block) {
+ const char* as_str = env_block.c_str();
+ while (as_str[0]) {
+ if (_strnicmp(as_str, "path=", 5) == 0) {
+ _putenv(as_str);
+ return;
+ } else {
+ as_str = &as_str[strlen(as_str) + 1];
+ }
+ }
+}
+
+void WriteDepFileOrDie(const char* object_path, const CLParser& parse) {
+ string depfile_path = string(object_path) + ".d";
+ FILE* depfile = fopen(depfile_path.c_str(), "w");
+ if (!depfile) {
+ unlink(object_path);
+ Fatal("opening %s: %s", depfile_path.c_str(),
+ GetLastErrorString().c_str());
+ }
+ if (fprintf(depfile, "%s: ", object_path) < 0) {
+ unlink(object_path);
+ fclose(depfile);
+ unlink(depfile_path.c_str());
+ Fatal("writing %s", depfile_path.c_str());
+ }
+ const set<string>& headers = parse.includes_;
+ for (set<string>::const_iterator i = headers.begin();
+ i != headers.end(); ++i) {
+ if (fprintf(depfile, "%s\n", EscapeForDepfile(*i).c_str()) < 0) {
+ unlink(object_path);
+ fclose(depfile);
+ unlink(depfile_path.c_str());
+ Fatal("writing %s", depfile_path.c_str());
+ }
+ }
+ fclose(depfile);
+}
+
+} // anonymous namespace
+
+int MSVCHelperMain(int argc, char** argv) {
+ const char* output_filename = NULL;
+ const char* envfile = NULL;
+
+ const option kLongOptions[] = {
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+ int opt;
+ while ((opt = getopt_long(argc, argv, "e:o:h", kLongOptions, NULL)) != -1) {
+ switch (opt) {
+ case 'e':
+ envfile = optarg;
+ break;
+ case 'o':
+ output_filename = optarg;
+ break;
+ case 'h':
+ default:
+ Usage();
+ return 0;
+ }
+ }
+
+ string env;
+ if (envfile) {
+ string err;
+ if (ReadFile(envfile, &env, &err) != 0)
+ Fatal("couldn't open %s: %s", envfile, err.c_str());
+ PushPathIntoEnvironment(env);
+ }
+
+ char* command = GetCommandLine();
+ command = strstr(command, " -- ");
+ if (!command) {
+ Fatal("expected command line to end with \" -- command args\"");
+ }
+ command += 4;
+
+ CLWrapper cl;
+ if (!env.empty())
+ cl.SetEnvBlock((void*)env.data());
+ string output;
+ int exit_code = cl.Run(command, &output);
+
+ if (output_filename) {
+ CLParser parser;
+ output = parser.Parse(output);
+ WriteDepFileOrDie(output_filename, parser);
+ }
+
+ // CLWrapper's output already as \r\n line endings, make sure the C runtime
+ // doesn't expand this to \r\r\n.
+ _setmode(_fileno(stdout), _O_BINARY);
+ printf("%s", output.c_str());
+
+ return exit_code;
+}
diff --git a/ninja/src/msvc_helper_test.cc b/ninja/src/msvc_helper_test.cc
new file mode 100644
index 00000000000..02f2863426f
--- /dev/null
+++ b/ninja/src/msvc_helper_test.cc
@@ -0,0 +1,112 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "msvc_helper.h"
+
+#include <gtest/gtest.h>
+
+#include "test.h"
+#include "util.h"
+
+TEST(CLParserTest, ShowIncludes) {
+ ASSERT_EQ("", CLParser::FilterShowIncludes(""));
+
+ ASSERT_EQ("", CLParser::FilterShowIncludes("Sample compiler output"));
+ ASSERT_EQ("c:\\Some Files\\foobar.h",
+ CLParser::FilterShowIncludes("Note: including file: "
+ "c:\\Some Files\\foobar.h"));
+ ASSERT_EQ("c:\\initspaces.h",
+ CLParser::FilterShowIncludes("Note: including file: "
+ "c:\\initspaces.h"));
+}
+
+TEST(CLParserTest, FilterInputFilename) {
+ ASSERT_TRUE(CLParser::FilterInputFilename("foobar.cc"));
+ ASSERT_TRUE(CLParser::FilterInputFilename("foo bar.cc"));
+ ASSERT_TRUE(CLParser::FilterInputFilename("baz.c"));
+ ASSERT_TRUE(CLParser::FilterInputFilename("FOOBAR.CC"));
+
+ ASSERT_FALSE(CLParser::FilterInputFilename(
+ "src\\cl_helper.cc(166) : fatal error C1075: end "
+ "of file found ..."));
+}
+
+TEST(CLParserTest, ParseSimple) {
+ CLParser parser;
+ string output = parser.Parse(
+ "foo\r\n"
+ "Note: including file: foo.h\r\n"
+ "bar\r\n");
+
+ ASSERT_EQ("foo\nbar\n", output);
+ ASSERT_EQ(1u, parser.includes_.size());
+ ASSERT_EQ("foo.h", *parser.includes_.begin());
+}
+
+TEST(CLParserTest, ParseFilenameFilter) {
+ CLParser parser;
+ string output = parser.Parse(
+ "foo.cc\r\n"
+ "cl: warning\r\n");
+ ASSERT_EQ("cl: warning\n", output);
+}
+
+TEST(CLParserTest, ParseSystemInclude) {
+ CLParser parser;
+ string output = parser.Parse(
+ "Note: including file: c:\\Program Files\\foo.h\r\n"
+ "Note: including file: d:\\Microsoft Visual Studio\\bar.h\r\n"
+ "Note: including file: path.h\r\n");
+ // We should have dropped the first two includes because they look like
+ // system headers.
+ ASSERT_EQ("", output);
+ ASSERT_EQ(1u, parser.includes_.size());
+ ASSERT_EQ("path.h", *parser.includes_.begin());
+}
+
+TEST(CLParserTest, DuplicatedHeader) {
+ CLParser parser;
+ string output = parser.Parse(
+ "Note: including file: foo.h\r\n"
+ "Note: including file: bar.h\r\n"
+ "Note: including file: foo.h\r\n");
+ // We should have dropped one copy of foo.h.
+ ASSERT_EQ("", output);
+ ASSERT_EQ(2u, parser.includes_.size());
+}
+
+TEST(CLParserTest, DuplicatedHeaderPathConverted) {
+ CLParser parser;
+ string output = parser.Parse(
+ "Note: including file: sub/foo.h\r\n"
+ "Note: including file: bar.h\r\n"
+ "Note: including file: sub\\foo.h\r\n");
+ // We should have dropped one copy of foo.h.
+ ASSERT_EQ("", output);
+ ASSERT_EQ(2u, parser.includes_.size());
+}
+
+TEST(CLParserTest, SpacesInFilename) {
+ ASSERT_EQ("sub\\some\\ sdk\\foo.h",
+ EscapeForDepfile("sub\\some sdk\\foo.h"));
+}
+
+TEST(MSVCHelperTest, EnvBlock) {
+ char env_block[] = "foo=bar\0";
+ CLWrapper cl;
+ cl.SetEnvBlock(env_block);
+ string output;
+ cl.Run("cmd /c \"echo foo is %foo%", &output);
+ ASSERT_EQ("foo is bar\r\n", output);
+}
diff --git a/ninja/src/ninja.cc b/ninja/src/ninja.cc
new file mode 100644
index 00000000000..3b381b762d7
--- /dev/null
+++ b/ninja/src/ninja.cc
@@ -0,0 +1,1030 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _WIN32
+#include "getopt.h"
+#include <direct.h>
+#include <windows.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+#include "browse.h"
+#include "build.h"
+#include "build_log.h"
+#include "deps_log.h"
+#include "clean.h"
+#include "disk_interface.h"
+#include "explain.h"
+#include "graph.h"
+#include "graphviz.h"
+#include "manifest_parser.h"
+#include "metrics.h"
+#include "state.h"
+#include "util.h"
+#include "version.h"
+
+#ifdef _MSC_VER
+// Defined in msvc_helper_main-win32.cc.
+int MSVCHelperMain(int argc, char** argv);
+
+// Defined in minidump-win32.cc.
+void CreateWin32MiniDump(_EXCEPTION_POINTERS* pep);
+#endif
+
+namespace {
+
+struct Tool;
+
+/// Command-line options.
+struct Options {
+ /// Build file to load.
+ const char* input_file;
+
+ /// Directory to change into before running.
+ const char* working_dir;
+
+ /// Tool to run rather than building.
+ const Tool* tool;
+};
+
+/// The Ninja main() loads up a series of data structures; various tools need
+/// to poke into these, so store them as fields on an object.
+struct NinjaMain {
+ NinjaMain(const char* ninja_command, const BuildConfig& config) :
+ ninja_command_(ninja_command), config_(config) {}
+
+ /// Command line used to run Ninja.
+ const char* ninja_command_;
+
+ /// Build configuration set from flags (e.g. parallelism).
+ const BuildConfig& config_;
+
+ /// Loaded state (rules, nodes).
+ State state_;
+
+ /// Functions for accesssing the disk.
+ RealDiskInterface disk_interface_;
+
+ /// The build directory, used for storing the build log etc.
+ string build_dir_;
+
+ BuildLog build_log_;
+ DepsLog deps_log_;
+
+ /// The type of functions that are the entry points to tools (subcommands).
+ typedef int (NinjaMain::*ToolFunc)(int, char**);
+
+ /// Get the Node for a given command-line path, handling features like
+ /// spell correction.
+ Node* CollectTarget(const char* cpath, string* err);
+
+ /// CollectTarget for all command-line arguments, filling in \a targets.
+ bool CollectTargetsFromArgs(int argc, char* argv[],
+ vector<Node*>* targets, string* err);
+
+ // The various subcommands, run via "-t XXX".
+ int ToolGraph(int argc, char* argv[]);
+ int ToolQuery(int argc, char* argv[]);
+ int ToolBrowse(int argc, char* argv[]);
+ int ToolMSVC(int argc, char* argv[]);
+ int ToolTargets(int argc, char* argv[]);
+ int ToolCommands(int argc, char* argv[]);
+ int ToolClean(int argc, char* argv[]);
+ int ToolCompilationDatabase(int argc, char* argv[]);
+ int ToolUrtle(int argc, char** argv);
+
+ /// Open the build log.
+ /// @return false on error.
+ bool OpenBuildLog();
+
+ /// Open the deps log: load it, then open for writing.
+ /// @return false on error.
+ bool OpenDepsLog();
+
+ /// Ensure the build directory exists, creating it if necessary.
+ /// @return false on error.
+ bool EnsureBuildDirExists();
+
+ /// Rebuild the manifest, if necessary.
+ /// Fills in \a err on error.
+ /// @return true if the manifest was rebuilt.
+ bool RebuildManifest(const char* input_file, string* err);
+
+ /// Build the targets listed on the command line.
+ /// @return an exit code.
+ int RunBuild(int argc, char** argv);
+
+ /// Dump the output requested by '-d stats'.
+ void DumpMetrics();
+};
+
+/// Subtools, accessible via "-t foo".
+struct Tool {
+ /// Short name of the tool.
+ const char* name;
+
+ /// Description (shown in "-t list").
+ const char* desc;
+
+ /// When to run the tool.
+ enum {
+ /// Run after parsing the command-line flags (as early as possible).
+ RUN_AFTER_FLAGS,
+
+ /// Run after loading build.ninja.
+ RUN_AFTER_LOAD,
+
+ /// Run after loading the build/deps logs.
+ RUN_AFTER_LOGS,
+ } when;
+
+ /// Implementation of the tool.
+ NinjaMain::ToolFunc func;
+};
+
+/// Print usage information.
+void Usage(const BuildConfig& config) {
+ fprintf(stderr,
+"usage: ninja [options] [targets...]\n"
+"\n"
+"if targets are unspecified, builds the 'default' target (see manual).\n"
+"\n"
+"options:\n"
+" --version print ninja version (\"%s\")\n"
+"\n"
+" -C DIR change to DIR before doing anything else\n"
+" -f FILE specify input build file [default=build.ninja]\n"
+"\n"
+" -j N run N jobs in parallel [default=%d, derived from CPUs available]\n"
+" -l N do not start new jobs if the load average is greater than N\n"
+#ifdef _WIN32
+" (not yet implemented on Windows)\n"
+#endif
+" -k N keep going until N jobs fail [default=1]\n"
+" -n dry run (don't run commands but act like they succeeded)\n"
+" -v show all command lines while building\n"
+"\n"
+" -d MODE enable debugging (use -d list to list modes)\n"
+" -t TOOL run a subtool (use -t list to list subtools)\n"
+" terminates toplevel options; further flags are passed to the tool\n",
+ kNinjaVersion, config.parallelism);
+}
+
+/// Choose a default value for the -j (parallelism) flag.
+int GuessParallelism() {
+ switch (int processors = GetProcessorCount()) {
+ case 0:
+ case 1:
+ return 2;
+ case 2:
+ return 3;
+ default:
+ return processors + 2;
+ }
+}
+
+/// An implementation of ManifestParser::FileReader that actually reads
+/// the file.
+struct RealFileReader : public ManifestParser::FileReader {
+ virtual bool ReadFile(const string& path, string* content, string* err) {
+ return ::ReadFile(path, content, err) == 0;
+ }
+};
+
+/// Rebuild the build manifest, if necessary.
+/// Returns true if the manifest was rebuilt.
+bool NinjaMain::RebuildManifest(const char* input_file, string* err) {
+ string path = input_file;
+ if (!CanonicalizePath(&path, err))
+ return false;
+ Node* node = state_.LookupNode(path);
+ if (!node)
+ return false;
+
+ Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_);
+ if (!builder.AddTarget(node, err))
+ return false;
+
+ if (builder.AlreadyUpToDate())
+ return false; // Not an error, but we didn't rebuild.
+ if (!builder.Build(err))
+ return false;
+
+ // The manifest was only rebuilt if it is now dirty (it may have been cleaned
+ // by a restat).
+ return node->dirty();
+}
+
+Node* NinjaMain::CollectTarget(const char* cpath, string* err) {
+ string path = cpath;
+ if (!CanonicalizePath(&path, err))
+ return NULL;
+
+ // Special syntax: "foo.cc^" means "the first output of foo.cc".
+ bool first_dependent = false;
+ if (!path.empty() && path[path.size() - 1] == '^') {
+ path.resize(path.size() - 1);
+ first_dependent = true;
+ }
+
+ Node* node = state_.LookupNode(path);
+ if (node) {
+ if (first_dependent) {
+ if (node->out_edges().empty()) {
+ *err = "'" + path + "' has no out edge";
+ return NULL;
+ }
+ Edge* edge = node->out_edges()[0];
+ if (edge->outputs_.empty()) {
+ edge->Dump();
+ Fatal("edge has no outputs");
+ }
+ node = edge->outputs_[0];
+ }
+ return node;
+ } else {
+ *err = "unknown target '" + path + "'";
+
+ if (path == "clean") {
+ *err += ", did you mean 'ninja -t clean'?";
+ } else if (path == "help") {
+ *err += ", did you mean 'ninja -h'?";
+ } else {
+ Node* suggestion = state_.SpellcheckNode(path);
+ if (suggestion) {
+ *err += ", did you mean '" + suggestion->path() + "'?";
+ }
+ }
+ return NULL;
+ }
+}
+
+bool NinjaMain::CollectTargetsFromArgs(int argc, char* argv[],
+ vector<Node*>* targets, string* err) {
+ if (argc == 0) {
+ *targets = state_.DefaultNodes(err);
+ return err->empty();
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ Node* node = CollectTarget(argv[i], err);
+ if (node == NULL)
+ return false;
+ targets->push_back(node);
+ }
+ return true;
+}
+
+int NinjaMain::ToolGraph(int argc, char* argv[]) {
+ vector<Node*> nodes;
+ string err;
+ if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ GraphViz graph;
+ graph.Start();
+ for (vector<Node*>::const_iterator n = nodes.begin(); n != nodes.end(); ++n)
+ graph.AddTarget(*n);
+ graph.Finish();
+
+ return 0;
+}
+
+int NinjaMain::ToolQuery(int argc, char* argv[]) {
+ if (argc == 0) {
+ Error("expected a target to query");
+ return 1;
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ string err;
+ Node* node = CollectTarget(argv[i], &err);
+ if (!node) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ printf("%s:\n", node->path().c_str());
+ if (Edge* edge = node->in_edge()) {
+ printf(" input: %s\n", edge->rule_->name().c_str());
+ for (int in = 0; in < (int)edge->inputs_.size(); in++) {
+ const char* label = "";
+ if (edge->is_implicit(in))
+ label = "| ";
+ else if (edge->is_order_only(in))
+ label = "|| ";
+ printf(" %s%s\n", label, edge->inputs_[in]->path().c_str());
+ }
+ }
+ printf(" outputs:\n");
+ for (vector<Edge*>::const_iterator edge = node->out_edges().begin();
+ edge != node->out_edges().end(); ++edge) {
+ for (vector<Node*>::iterator out = (*edge)->outputs_.begin();
+ out != (*edge)->outputs_.end(); ++out) {
+ printf(" %s\n", (*out)->path().c_str());
+ }
+ }
+ }
+ return 0;
+}
+
+#if !defined(_WIN32) && !defined(NINJA_BOOTSTRAP)
+int NinjaMain::ToolBrowse(int argc, char* argv[]) {
+ if (argc < 1) {
+ Error("expected a target to browse");
+ return 1;
+ }
+ RunBrowsePython(&state_, ninja_command_, argv[0]);
+ // If we get here, the browse failed.
+ return 1;
+}
+#endif // _WIN32
+
+#if defined(_MSC_VER)
+int NinjaMain::ToolMSVC(int argc, char* argv[]) {
+ // Reset getopt: push one argument onto the front of argv, reset optind.
+ argc++;
+ argv--;
+ optind = 0;
+ return MSVCHelperMain(argc, argv);
+}
+#endif
+
+int ToolTargetsList(const vector<Node*>& nodes, int depth, int indent) {
+ for (vector<Node*>::const_iterator n = nodes.begin();
+ n != nodes.end();
+ ++n) {
+ for (int i = 0; i < indent; ++i)
+ printf(" ");
+ const char* target = (*n)->path().c_str();
+ if ((*n)->in_edge()) {
+ printf("%s: %s\n", target, (*n)->in_edge()->rule_->name().c_str());
+ if (depth > 1 || depth <= 0)
+ ToolTargetsList((*n)->in_edge()->inputs_, depth - 1, indent + 1);
+ } else {
+ printf("%s\n", target);
+ }
+ }
+ return 0;
+}
+
+int ToolTargetsSourceList(State* state) {
+ for (vector<Edge*>::iterator e = state->edges_.begin();
+ e != state->edges_.end(); ++e) {
+ for (vector<Node*>::iterator inps = (*e)->inputs_.begin();
+ inps != (*e)->inputs_.end(); ++inps) {
+ if (!(*inps)->in_edge())
+ printf("%s\n", (*inps)->path().c_str());
+ }
+ }
+ return 0;
+}
+
+int ToolTargetsList(State* state, const string& rule_name) {
+ set<string> rules;
+
+ // Gather the outputs.
+ for (vector<Edge*>::iterator e = state->edges_.begin();
+ e != state->edges_.end(); ++e) {
+ if ((*e)->rule_->name() == rule_name) {
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ rules.insert((*out_node)->path());
+ }
+ }
+ }
+
+ // Print them.
+ for (set<string>::const_iterator i = rules.begin();
+ i != rules.end(); ++i) {
+ printf("%s\n", (*i).c_str());
+ }
+
+ return 0;
+}
+
+int ToolTargetsList(State* state) {
+ for (vector<Edge*>::iterator e = state->edges_.begin();
+ e != state->edges_.end(); ++e) {
+ for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ printf("%s: %s\n",
+ (*out_node)->path().c_str(),
+ (*e)->rule_->name().c_str());
+ }
+ }
+ return 0;
+}
+
+int NinjaMain::ToolTargets(int argc, char* argv[]) {
+ int depth = 1;
+ if (argc >= 1) {
+ string mode = argv[0];
+ if (mode == "rule") {
+ string rule;
+ if (argc > 1)
+ rule = argv[1];
+ if (rule.empty())
+ return ToolTargetsSourceList(&state_);
+ else
+ return ToolTargetsList(&state_, rule);
+ } else if (mode == "depth") {
+ if (argc > 1)
+ depth = atoi(argv[1]);
+ } else if (mode == "all") {
+ return ToolTargetsList(&state_);
+ } else {
+ const char* suggestion =
+ SpellcheckString(mode.c_str(), "rule", "depth", "all", NULL);
+ if (suggestion) {
+ Error("unknown target tool mode '%s', did you mean '%s'?",
+ mode.c_str(), suggestion);
+ } else {
+ Error("unknown target tool mode '%s'", mode.c_str());
+ }
+ return 1;
+ }
+ }
+
+ string err;
+ vector<Node*> root_nodes = state_.RootNodes(&err);
+ if (err.empty()) {
+ return ToolTargetsList(root_nodes, depth, 0);
+ } else {
+ Error("%s", err.c_str());
+ return 1;
+ }
+}
+
+void PrintCommands(Edge* edge, set<Edge*>* seen) {
+ if (!edge)
+ return;
+ if (!seen->insert(edge).second)
+ return;
+
+ for (vector<Node*>::iterator in = edge->inputs_.begin();
+ in != edge->inputs_.end(); ++in)
+ PrintCommands((*in)->in_edge(), seen);
+
+ if (!edge->is_phony())
+ puts(edge->EvaluateCommand().c_str());
+}
+
+int NinjaMain::ToolCommands(int argc, char* argv[]) {
+ vector<Node*> nodes;
+ string err;
+ if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ set<Edge*> seen;
+ for (vector<Node*>::iterator in = nodes.begin(); in != nodes.end(); ++in)
+ PrintCommands((*in)->in_edge(), &seen);
+
+ return 0;
+}
+
+int NinjaMain::ToolClean(int argc, char* argv[]) {
+ // The clean tool uses getopt, and expects argv[0] to contain the name of
+ // the tool, i.e. "clean".
+ argc++;
+ argv--;
+
+ bool generator = false;
+ bool clean_rules = false;
+
+ optind = 1;
+ int opt;
+ while ((opt = getopt(argc, argv, const_cast<char*>("hgr"))) != -1) {
+ switch (opt) {
+ case 'g':
+ generator = true;
+ break;
+ case 'r':
+ clean_rules = true;
+ break;
+ case 'h':
+ default:
+ printf("usage: ninja -t clean [options] [targets]\n"
+"\n"
+"options:\n"
+" -g also clean files marked as ninja generator output\n"
+" -r interpret targets as a list of rules to clean instead\n"
+ );
+ return 1;
+ }
+ }
+ argv += optind;
+ argc -= optind;
+
+ if (clean_rules && argc == 0) {
+ Error("expected a rule to clean");
+ return 1;
+ }
+
+ Cleaner cleaner(&state_, config_);
+ if (argc >= 1) {
+ if (clean_rules)
+ return cleaner.CleanRules(argc, argv);
+ else
+ return cleaner.CleanTargets(argc, argv);
+ } else {
+ return cleaner.CleanAll(generator);
+ }
+}
+
+void EncodeJSONString(const char *str) {
+ while (*str) {
+ if (*str == '"' || *str == '\\')
+ putchar('\\');
+ putchar(*str);
+ str++;
+ }
+}
+
+int NinjaMain::ToolCompilationDatabase(int argc, char* argv[]) {
+ bool first = true;
+ vector<char> cwd;
+
+ do {
+ cwd.resize(cwd.size() + 1024);
+ errno = 0;
+ } while (!getcwd(&cwd[0], cwd.size()) && errno == ERANGE);
+ if (errno != 0 && errno != ERANGE) {
+ Error("cannot determine working directory: %s", strerror(errno));
+ return 1;
+ }
+
+ putchar('[');
+ for (vector<Edge*>::iterator e = state_.edges_.begin();
+ e != state_.edges_.end(); ++e) {
+ for (int i = 0; i != argc; ++i) {
+ if ((*e)->rule_->name() == argv[i]) {
+ if (!first)
+ putchar(',');
+
+ printf("\n {\n \"directory\": \"");
+ EncodeJSONString(&cwd[0]);
+ printf("\",\n \"command\": \"");
+ EncodeJSONString((*e)->EvaluateCommand().c_str());
+ printf("\",\n \"file\": \"");
+ EncodeJSONString((*e)->inputs_[0]->path().c_str());
+ printf("\"\n }");
+
+ first = false;
+ }
+ }
+ }
+
+ puts("\n]");
+ return 0;
+}
+
+int NinjaMain::ToolUrtle(int argc, char** argv) {
+ // RLE encoded.
+ const char* urtle =
+" 13 ,3;2!2;\n8 ,;<11!;\n5 `'<10!(2`'2!\n11 ,6;, `\\. `\\9 .,c13$ec,.\n6 "
+",2;11!>; `. ,;!2> .e8$2\".2 \"?7$e.\n <:<8!'` 2.3,.2` ,3!' ;,(?7\";2!2'<"
+"; `?6$PF ,;,\n2 `'4!8;<!3'`2 3! ;,`'2`2'3!;4!`2.`!;2 3,2 .<!2'`).\n5 3`5"
+"'2`9 `!2 `4!><3;5! J2$b,`!>;2!:2!`,d?b`!>\n26 `'-;,(<9!> $F3 )3.:!.2 d\""
+"2 ) !>\n30 7`2'<3!- \"=-='5 .2 `2-=\",!>\n25 .ze9$er2 .,cd16$bc.'\n22 .e"
+"14$,26$.\n21 z45$c .\n20 J50$c\n20 14$P\"`?34$b\n20 14$ dbc `2\"?22$?7$c"
+"\n20 ?18$c.6 4\"8?4\" c8$P\n9 .2,.8 \"20$c.3 ._14 J9$\n .2,2c9$bec,.2 `?"
+"21$c.3`4%,3%,3 c8$P\"\n22$c2 2\"?21$bc2,.2` .2,c7$P2\",cb\n23$b bc,.2\"2"
+"?14$2F2\"5?2\",J5$P\" ,zd3$\n24$ ?$3?%3 `2\"2?12$bcucd3$P3\"2 2=7$\n23$P"
+"\" ,3;<5!>2;,. `4\"6?2\"2 ,9;, `\"?2$\n";
+ int count = 0;
+ for (const char* p = urtle; *p; p++) {
+ if ('0' <= *p && *p <= '9') {
+ count = count*10 + *p - '0';
+ } else {
+ for (int i = 0; i < std::max(count, 1); ++i)
+ printf("%c", *p);
+ count = 0;
+ }
+ }
+ return 0;
+}
+
+/// Find the function to execute for \a tool_name and return it via \a func.
+/// Returns a Tool, or NULL if Ninja should exit.
+const Tool* ChooseTool(const string& tool_name) {
+ static const Tool kTools[] = {
+#if !defined(_WIN32) && !defined(NINJA_BOOTSTRAP)
+ { "browse", "browse dependency graph in a web browser",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolBrowse },
+#endif
+#if defined(_MSC_VER)
+ { "msvc", "build helper for MSVC cl.exe (EXPERIMENTAL)",
+ Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolMSVC },
+#endif
+ { "clean", "clean built files",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolClean },
+ { "commands", "list all commands required to rebuild given targets",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCommands },
+ { "graph", "output graphviz dot file for targets",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolGraph },
+ { "query", "show inputs/outputs for a path",
+ Tool::RUN_AFTER_LOGS, &NinjaMain::ToolQuery },
+ { "targets", "list targets by their rule or depth in the DAG",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolTargets },
+ { "compdb", "dump JSON compilation database to stdout",
+ Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCompilationDatabase },
+ { "urtle", NULL,
+ Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolUrtle },
+ { NULL, NULL, Tool::RUN_AFTER_FLAGS, NULL }
+ };
+
+ if (tool_name == "list") {
+ printf("ninja subtools:\n");
+ for (const Tool* tool = &kTools[0]; tool->name; ++tool) {
+ if (tool->desc)
+ printf("%10s %s\n", tool->name, tool->desc);
+ }
+ return NULL;
+ }
+
+ for (const Tool* tool = &kTools[0]; tool->name; ++tool) {
+ if (tool->name == tool_name)
+ return tool;
+ }
+
+ vector<const char*> words;
+ for (const Tool* tool = &kTools[0]; tool->name; ++tool)
+ words.push_back(tool->name);
+ const char* suggestion = SpellcheckStringV(tool_name, words);
+ if (suggestion) {
+ Fatal("unknown tool '%s', did you mean '%s'?",
+ tool_name.c_str(), suggestion);
+ } else {
+ Fatal("unknown tool '%s'", tool_name.c_str());
+ }
+ return NULL; // Not reached.
+}
+
+/// Enable a debugging mode. Returns false if Ninja should exit instead
+/// of continuing.
+bool DebugEnable(const string& name) {
+ if (name == "list") {
+ printf("debugging modes:\n"
+" stats print operation counts/timing info\n"
+" explain explain what caused a command to execute\n"
+"multiple modes can be enabled via -d FOO -d BAR\n");
+ return false;
+ } else if (name == "stats") {
+ g_metrics = new Metrics;
+ return true;
+ } else if (name == "explain") {
+ g_explaining = true;
+ return true;
+ } else {
+ const char* suggestion =
+ SpellcheckString(name.c_str(), "stats", "explain", NULL);
+ if (suggestion) {
+ Error("unknown debug setting '%s', did you mean '%s'?",
+ name.c_str(), suggestion);
+ } else {
+ Error("unknown debug setting '%s'", name.c_str());
+ }
+ return false;
+ }
+}
+
+bool NinjaMain::OpenBuildLog() {
+ string log_path = ".ninja_log";
+ if (!build_dir_.empty())
+ log_path = build_dir_ + "/" + log_path;
+
+ string err;
+ if (!build_log_.Load(log_path, &err)) {
+ Error("loading build log %s: %s", log_path.c_str(), err.c_str());
+ return false;
+ }
+ if (!err.empty()) {
+ // Hack: Load() can return a warning via err by returning true.
+ Warning("%s", err.c_str());
+ err.clear();
+ }
+
+ if (!config_.dry_run) {
+ if (!build_log_.OpenForWrite(log_path, &err)) {
+ Error("opening build log: %s", err.c_str());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Open the deps log: load it, then open for writing.
+/// @return false on error.
+bool NinjaMain::OpenDepsLog() {
+ string path = ".ninja_deps";
+ if (!build_dir_.empty())
+ path = build_dir_ + "/" + path;
+
+ string err;
+ if (!deps_log_.Load(path, &state_, &err)) {
+ Error("loading deps log %s: %s", path.c_str(), err.c_str());
+ return false;
+ }
+ if (!err.empty()) {
+ // Hack: Load() can return a warning via err by returning true.
+ Warning("%s", err.c_str());
+ err.clear();
+ }
+
+ if (!config_.dry_run) {
+ if (!deps_log_.OpenForWrite(path, &err)) {
+ Error("opening deps log: %s", err.c_str());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void NinjaMain::DumpMetrics() {
+ g_metrics->Report();
+
+ printf("\n");
+ int count = (int)state_.paths_.size();
+ int buckets = (int)state_.paths_.bucket_count();
+ printf("path->node hash load %.2f (%d entries / %d buckets)\n",
+ count / (double) buckets, count, buckets);
+}
+
+bool NinjaMain::EnsureBuildDirExists() {
+ build_dir_ = state_.bindings_.LookupVariable("builddir");
+ if (!build_dir_.empty() && !config_.dry_run) {
+ if (!disk_interface_.MakeDirs(build_dir_ + "/.") && errno != EEXIST) {
+ Error("creating build directory %s: %s",
+ build_dir_.c_str(), strerror(errno));
+ return false;
+ }
+ }
+ return true;
+}
+
+int NinjaMain::RunBuild(int argc, char** argv) {
+ string err;
+ vector<Node*> targets;
+ if (!CollectTargetsFromArgs(argc, argv, &targets, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_);
+ for (size_t i = 0; i < targets.size(); ++i) {
+ if (!builder.AddTarget(targets[i], &err)) {
+ if (!err.empty()) {
+ Error("%s", err.c_str());
+ return 1;
+ } else {
+ // Added a target that is already up-to-date; not really
+ // an error.
+ }
+ }
+ }
+
+ if (builder.AlreadyUpToDate()) {
+ printf("ninja: no work to do.\n");
+ return 0;
+ }
+
+ if (!builder.Build(&err)) {
+ printf("ninja: build stopped: %s.\n", err.c_str());
+ if (err.find("interrupted by user") != string::npos) {
+ return 2;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef _MSC_VER
+
+/// This handler processes fatal crashes that you can't catch
+/// Test example: C++ exception in a stack-unwind-block
+/// Real-world example: ninja launched a compiler to process a tricky
+/// C++ input file. The compiler got itself into a state where it
+/// generated 3 GB of output and caused ninja to crash.
+void TerminateHandler() {
+ CreateWin32MiniDump(NULL);
+ Fatal("terminate handler called");
+}
+
+/// On Windows, we want to prevent error dialogs in case of exceptions.
+/// This function handles the exception, and writes a minidump.
+int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep) {
+ Error("exception: 0x%X", code); // e.g. EXCEPTION_ACCESS_VIOLATION
+ fflush(stderr);
+ CreateWin32MiniDump(ep);
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+#endif // _MSC_VER
+
+/// Parse argv for command-line options.
+/// Returns an exit code, or -1 if Ninja should continue.
+int ReadFlags(int* argc, char*** argv,
+ Options* options, BuildConfig* config) {
+ config->parallelism = GuessParallelism();
+
+ enum { OPT_VERSION = 1 };
+ const option kLongOptions[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, OPT_VERSION },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int opt;
+ while (!options->tool &&
+ (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vC:h", kLongOptions,
+ NULL)) != -1) {
+ switch (opt) {
+ case 'd':
+ if (!DebugEnable(optarg))
+ return 1;
+ break;
+ case 'f':
+ options->input_file = optarg;
+ break;
+ case 'j': {
+ char* end;
+ int value = strtol(optarg, &end, 10);
+ if (*end != 0 || value <= 0)
+ Fatal("invalid -j parameter");
+ config->parallelism = value;
+ break;
+ }
+ case 'k': {
+ char* end;
+ int value = strtol(optarg, &end, 10);
+ if (*end != 0)
+ Fatal("-k parameter not numeric; did you mean -k 0?");
+
+ // We want to go until N jobs fail, which means we should allow
+ // N failures and then stop. For N <= 0, INT_MAX is close enough
+ // to infinite for most sane builds.
+ config->failures_allowed = value > 0 ? value : INT_MAX;
+ break;
+ }
+ case 'l': {
+ char* end;
+ double value = strtod(optarg, &end);
+ if (end == optarg)
+ Fatal("-l parameter not numeric: did you mean -l 0.0?");
+ config->max_load_average = value;
+ break;
+ }
+ case 'n':
+ config->dry_run = true;
+ break;
+ case 't':
+ options->tool = ChooseTool(optarg);
+ if (!options->tool)
+ return 0;
+ break;
+ case 'v':
+ config->verbosity = BuildConfig::VERBOSE;
+ break;
+ case 'C':
+ options->working_dir = optarg;
+ break;
+ case OPT_VERSION:
+ printf("%s\n", kNinjaVersion);
+ return 0;
+ case 'h':
+ default:
+ Usage(*config);
+ return 1;
+ }
+ }
+ *argv += optind;
+ *argc -= optind;
+
+ return -1;
+}
+
+int real_main(int argc, char** argv) {
+ BuildConfig config;
+ Options options = {};
+ options.input_file = "build.ninja";
+
+ setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
+
+ int exit_code = ReadFlags(&argc, &argv, &options, &config);
+ if (exit_code >= 0)
+ return exit_code;
+
+ if (options.tool && options.tool->when == Tool::RUN_AFTER_FLAGS) {
+ // None of the RUN_AFTER_FLAGS actually use a NinjaMain, but it's needed
+ // by other tools.
+ NinjaMain ninja(argv[0], config);
+ return (ninja.*options.tool->func)(argc, argv);
+ }
+
+ if (options.working_dir) {
+ // The formatting of this string, complete with funny quotes, is
+ // so Emacs can properly identify that the cwd has changed for
+ // subsequent commands.
+ // Don't print this if a tool is being used, so that tool output
+ // can be piped into a file without this string showing up.
+ if (!options.tool)
+ printf("ninja: Entering directory `%s'\n", options.working_dir);
+ if (chdir(options.working_dir) < 0) {
+ Fatal("chdir to '%s' - %s", options.working_dir, strerror(errno));
+ }
+ }
+
+ // The build can take up to 2 passes: one to rebuild the manifest, then
+ // another to build the desired target.
+ for (int cycle = 0; cycle < 2; ++cycle) {
+ NinjaMain ninja(argv[0], config);
+
+ RealFileReader file_reader;
+ ManifestParser parser(&ninja.state_, &file_reader);
+ string err;
+ if (!parser.Load(options.input_file, &err)) {
+ Error("%s", err.c_str());
+ return 1;
+ }
+
+ if (options.tool && options.tool->when == Tool::RUN_AFTER_LOAD)
+ return (ninja.*options.tool->func)(argc, argv);
+
+ if (!ninja.EnsureBuildDirExists())
+ return 1;
+
+ if (!ninja.OpenBuildLog() || !ninja.OpenDepsLog())
+ return 1;
+
+ if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS)
+ return (ninja.*options.tool->func)(argc, argv);
+
+ // The first time through, attempt to rebuild the manifest before
+ // building anything else.
+ if (cycle == 0) {
+ if (ninja.RebuildManifest(options.input_file, &err)) {
+ // Start the build over with the new manifest.
+ continue;
+ } else if (!err.empty()) {
+ Error("rebuilding '%s': %s", options.input_file, err.c_str());
+ return 1;
+ }
+ }
+
+ int result = ninja.RunBuild(argc, argv);
+ if (g_metrics)
+ ninja.DumpMetrics();
+ return result;
+ }
+
+ return 1; // Shouldn't be reached.
+}
+
+} // anonymous namespace
+
+int main(int argc, char** argv) {
+#if !defined(NINJA_BOOTSTRAP) && defined(_MSC_VER)
+ // Set a handler to catch crashes not caught by the __try..__except
+ // block (e.g. an exception in a stack-unwind-block).
+ set_terminate(TerminateHandler);
+ __try {
+ // Running inside __try ... __except suppresses any Windows error
+ // dialogs for errors such as bad_alloc.
+ return real_main(argc, argv);
+ }
+ __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) {
+ // Common error situations return exitCode=1. 2 was chosen to
+ // indicate a more serious problem.
+ return 2;
+ }
+#else
+ return real_main(argc, argv);
+#endif
+}
diff --git a/ninja/src/ninja_test.cc b/ninja/src/ninja_test.cc
new file mode 100644
index 00000000000..989ea5c72e2
--- /dev/null
+++ b/ninja/src/ninja_test.cc
@@ -0,0 +1,88 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "gtest/gtest.h"
+#include "line_printer.h"
+
+string StringPrintf(const char* format, ...) {
+ const int N = 1024;
+ char buf[N];
+
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(buf, N, format, ap);
+ va_end(ap);
+
+ return buf;
+}
+
+/// A test result printer that's less wordy than gtest's default.
+struct LaconicPrinter : public testing::EmptyTestEventListener {
+ LaconicPrinter() : tests_started_(0), test_count_(0), iteration_(0) {}
+ virtual void OnTestProgramStart(const testing::UnitTest& unit_test) {
+ test_count_ = unit_test.test_to_run_count();
+ }
+
+ virtual void OnTestIterationStart(const testing::UnitTest& test_info,
+ int iteration) {
+ tests_started_ = 0;
+ iteration_ = iteration;
+ }
+
+ virtual void OnTestStart(const testing::TestInfo& test_info) {
+ ++tests_started_;
+ printer_.Print(
+ StringPrintf("[%d/%d%s] %s.%s",
+ tests_started_,
+ test_count_,
+ iteration_ ? StringPrintf(" iter %d", iteration_).c_str()
+ : "",
+ test_info.test_case_name(),
+ test_info.name()),
+ LinePrinter::ELIDE);
+ }
+
+ virtual void OnTestPartResult(
+ const testing::TestPartResult& test_part_result) {
+ if (!test_part_result.failed())
+ return;
+ printer_.PrintOnNewLine(StringPrintf(
+ "*** Failure in %s:%d\n%s\n", test_part_result.file_name(),
+ test_part_result.line_number(), test_part_result.summary()));
+ }
+
+ virtual void OnTestProgramEnd(const testing::UnitTest& unit_test) {
+ printer_.PrintOnNewLine(unit_test.Passed() ? "passed\n" : "failed\n");
+ }
+
+ private:
+ LinePrinter printer_;
+ int tests_started_;
+ int test_count_;
+ int iteration_;
+};
+
+int main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
+
+ testing::TestEventListeners& listeners =
+ testing::UnitTest::GetInstance()->listeners();
+ delete listeners.Release(listeners.default_result_printer());
+ listeners.Append(new LaconicPrinter);
+
+ return RUN_ALL_TESTS();
+}
diff --git a/ninja/src/parser_perftest.cc b/ninja/src/parser_perftest.cc
new file mode 100644
index 00000000000..b21522168df
--- /dev/null
+++ b/ninja/src/parser_perftest.cc
@@ -0,0 +1,77 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "depfile_parser.h"
+#include "util.h"
+#include "metrics.h"
+
+int main(int argc, char* argv[]) {
+ if (argc < 2) {
+ printf("usage: %s <file1> <file2...>\n", argv[0]);
+ return 1;
+ }
+
+ vector<float> times;
+ for (int i = 1; i < argc; ++i) {
+ const char* filename = argv[i];
+
+ for (int limit = 1 << 10; limit < (1<<20); limit *= 2) {
+ int64_t start = GetTimeMillis();
+ for (int rep = 0; rep < limit; ++rep) {
+ string buf;
+ string err;
+ if (ReadFile(filename, &buf, &err) < 0) {
+ printf("%s: %s\n", filename, err.c_str());
+ return 1;
+ }
+
+ DepfileParser parser;
+ if (!parser.Parse(&buf, &err)) {
+ printf("%s: %s\n", filename, err.c_str());
+ return 1;
+ }
+ }
+ int64_t end = GetTimeMillis();
+
+ if (end - start > 100) {
+ int delta = (int)(end - start);
+ float time = delta*1000 / (float)limit;
+ printf("%s: %.1fus\n", filename, time);
+ times.push_back(time);
+ break;
+ }
+ }
+ }
+
+ if (!times.empty()) {
+ float min = times[0];
+ float max = times[0];
+ float total = 0;
+ for (size_t i = 0; i < times.size(); ++i) {
+ total += times[i];
+ if (times[i] < min)
+ min = times[i];
+ else if (times[i] > max)
+ max = times[i];
+ }
+
+ printf("min %.1fus max %.1fus avg %.1fus\n",
+ min, max, total / times.size());
+ }
+
+ return 0;
+}
diff --git a/ninja/src/state.cc b/ninja/src/state.cc
new file mode 100644
index 00000000000..9b6160bc2ca
--- /dev/null
+++ b/ninja/src/state.cc
@@ -0,0 +1,222 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "state.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "edit_distance.h"
+#include "graph.h"
+#include "metrics.h"
+#include "util.h"
+
+
+void Pool::EdgeScheduled(const Edge& edge) {
+ if (depth_ != 0)
+ current_use_ += edge.weight();
+}
+
+void Pool::EdgeFinished(const Edge& edge) {
+ if (depth_ != 0)
+ current_use_ -= edge.weight();
+}
+
+void Pool::DelayEdge(Edge* edge) {
+ assert(depth_ != 0);
+ delayed_.insert(edge);
+}
+
+void Pool::RetrieveReadyEdges(set<Edge*>* ready_queue) {
+ DelayedEdges::iterator it = delayed_.begin();
+ while (it != delayed_.end()) {
+ Edge* edge = *it;
+ if (current_use_ + edge->weight() > depth_)
+ break;
+ ready_queue->insert(edge);
+ EdgeScheduled(*edge);
+ ++it;
+ }
+ delayed_.erase(delayed_.begin(), it);
+}
+
+void Pool::Dump() const {
+ printf("%s (%d/%d) ->\n", name_.c_str(), current_use_, depth_);
+ for (DelayedEdges::const_iterator it = delayed_.begin();
+ it != delayed_.end(); ++it)
+ {
+ printf("\t");
+ (*it)->Dump();
+ }
+}
+
+bool Pool::WeightedEdgeCmp(const Edge* a, const Edge* b) {
+ if (!a) return b;
+ if (!b) return false;
+ int weight_diff = a->weight() - b->weight();
+ return ((weight_diff < 0) || (weight_diff == 0 && a < b));
+}
+
+Pool State::kDefaultPool("", 0);
+const Rule State::kPhonyRule("phony");
+
+State::State() {
+ AddRule(&kPhonyRule);
+ AddPool(&kDefaultPool);
+}
+
+void State::AddRule(const Rule* rule) {
+ assert(LookupRule(rule->name()) == NULL);
+ rules_[rule->name()] = rule;
+}
+
+const Rule* State::LookupRule(const string& rule_name) {
+ map<string, const Rule*>::iterator i = rules_.find(rule_name);
+ if (i == rules_.end())
+ return NULL;
+ return i->second;
+}
+
+void State::AddPool(Pool* pool) {
+ assert(LookupPool(pool->name()) == NULL);
+ pools_[pool->name()] = pool;
+}
+
+Pool* State::LookupPool(const string& pool_name) {
+ map<string, Pool*>::iterator i = pools_.find(pool_name);
+ if (i == pools_.end())
+ return NULL;
+ return i->second;
+}
+
+Edge* State::AddEdge(const Rule* rule) {
+ Edge* edge = new Edge();
+ edge->rule_ = rule;
+ edge->pool_ = &State::kDefaultPool;
+ edge->env_ = &bindings_;
+ edges_.push_back(edge);
+ return edge;
+}
+
+Node* State::GetNode(StringPiece path) {
+ Node* node = LookupNode(path);
+ if (node)
+ return node;
+ node = new Node(path.AsString());
+ paths_[node->path()] = node;
+ return node;
+}
+
+Node* State::LookupNode(StringPiece path) {
+ METRIC_RECORD("lookup node");
+ Paths::iterator i = paths_.find(path);
+ if (i != paths_.end())
+ return i->second;
+ return NULL;
+}
+
+Node* State::SpellcheckNode(const string& path) {
+ const bool kAllowReplacements = true;
+ const int kMaxValidEditDistance = 3;
+
+ int min_distance = kMaxValidEditDistance + 1;
+ Node* result = NULL;
+ for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) {
+ int distance = EditDistance(
+ i->first, path, kAllowReplacements, kMaxValidEditDistance);
+ if (distance < min_distance && i->second) {
+ min_distance = distance;
+ result = i->second;
+ }
+ }
+ return result;
+}
+
+void State::AddIn(Edge* edge, StringPiece path) {
+ Node* node = GetNode(path);
+ edge->inputs_.push_back(node);
+ node->AddOutEdge(edge);
+}
+
+void State::AddOut(Edge* edge, StringPiece path) {
+ Node* node = GetNode(path);
+ edge->outputs_.push_back(node);
+ if (node->in_edge()) {
+ Warning("multiple rules generate %s. "
+ "builds involving this target will not be correct; "
+ "continuing anyway",
+ path.AsString().c_str());
+ }
+ node->set_in_edge(edge);
+}
+
+bool State::AddDefault(StringPiece path, string* err) {
+ Node* node = LookupNode(path);
+ if (!node) {
+ *err = "unknown target '" + path.AsString() + "'";
+ return false;
+ }
+ defaults_.push_back(node);
+ return true;
+}
+
+vector<Node*> State::RootNodes(string* err) {
+ vector<Node*> root_nodes;
+ // Search for nodes with no output.
+ for (vector<Edge*>::iterator e = edges_.begin(); e != edges_.end(); ++e) {
+ for (vector<Node*>::iterator out = (*e)->outputs_.begin();
+ out != (*e)->outputs_.end(); ++out) {
+ if ((*out)->out_edges().empty())
+ root_nodes.push_back(*out);
+ }
+ }
+
+ if (!edges_.empty() && root_nodes.empty())
+ *err = "could not determine root nodes of build graph";
+
+ assert(edges_.empty() || !root_nodes.empty());
+ return root_nodes;
+}
+
+vector<Node*> State::DefaultNodes(string* err) {
+ return defaults_.empty() ? RootNodes(err) : defaults_;
+}
+
+void State::Reset() {
+ for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i)
+ i->second->ResetState();
+ for (vector<Edge*>::iterator e = edges_.begin(); e != edges_.end(); ++e)
+ (*e)->outputs_ready_ = false;
+}
+
+void State::Dump() {
+ for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) {
+ Node* node = i->second;
+ printf("%s %s [id:%d]\n",
+ node->path().c_str(),
+ node->status_known() ? (node->dirty() ? "dirty" : "clean")
+ : "unknown",
+ node->id());
+ }
+ if (!pools_.empty()) {
+ printf("resource_pools:\n");
+ for (map<string, Pool*>::const_iterator it = pools_.begin();
+ it != pools_.end(); ++it)
+ {
+ if (!it->second->name().empty()) {
+ it->second->Dump();
+ }
+ }
+ }
+}
diff --git a/ninja/src/state.h b/ninja/src/state.h
new file mode 100644
index 00000000000..bde75ff774f
--- /dev/null
+++ b/ninja/src/state.h
@@ -0,0 +1,134 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_STATE_H_
+#define NINJA_STATE_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+using namespace std;
+
+#include "eval_env.h"
+#include "hash_map.h"
+
+struct Edge;
+struct Node;
+struct Rule;
+
+/// A pool for delayed edges.
+/// Pools are scoped to a State. Edges within a State will share Pools. A Pool
+/// will keep a count of the total 'weight' of the currently scheduled edges. If
+/// a Plan attempts to schedule an Edge which would cause the total weight to
+/// exceed the depth of the Pool, the Pool will enque the Edge instead of
+/// allowing the Plan to schedule it. The Pool will relinquish queued Edges when
+/// the total scheduled weight diminishes enough (i.e. when a scheduled edge
+/// completes).
+struct Pool {
+ explicit Pool(const string& name, int depth)
+ : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) { }
+
+ // A depth of 0 is infinite
+ bool is_valid() const { return depth_ >= 0; }
+ int depth() const { return depth_; }
+ const string& name() const { return name_; }
+
+ /// true if the Pool might delay this edge
+ bool ShouldDelayEdge() const { return depth_ != 0; }
+
+ /// informs this Pool that the given edge is committed to be run.
+ /// Pool will count this edge as using resources from this pool.
+ void EdgeScheduled(const Edge& edge);
+
+ /// informs this Pool that the given edge is no longer runnable, and should
+ /// relinquish its resources back to the pool
+ void EdgeFinished(const Edge& edge);
+
+ /// adds the given edge to this Pool to be delayed.
+ void DelayEdge(Edge* edge);
+
+ /// Pool will add zero or more edges to the ready_queue
+ void RetrieveReadyEdges(set<Edge*>* ready_queue);
+
+ /// Dump the Pool and its edges (useful for debugging).
+ void Dump() const;
+
+ private:
+ string name_;
+
+ /// |current_use_| is the total of the weights of the edges which are
+ /// currently scheduled in the Plan (i.e. the edges in Plan::ready_).
+ int current_use_;
+ int depth_;
+
+ static bool WeightedEdgeCmp(const Edge* a, const Edge* b);
+
+ typedef set<Edge*,bool(*)(const Edge*, const Edge*)> DelayedEdges;
+ DelayedEdges delayed_;
+};
+
+/// Global state (file status, loaded rules) for a single run.
+struct State {
+ static Pool kDefaultPool;
+ static const Rule kPhonyRule;
+
+ State();
+
+ void AddRule(const Rule* rule);
+ const Rule* LookupRule(const string& rule_name);
+
+ void AddPool(Pool* pool);
+ Pool* LookupPool(const string& pool_name);
+
+ Edge* AddEdge(const Rule* rule);
+
+ Node* GetNode(StringPiece path);
+ Node* LookupNode(StringPiece path);
+ Node* SpellcheckNode(const string& path);
+
+ void AddIn(Edge* edge, StringPiece path);
+ void AddOut(Edge* edge, StringPiece path);
+ bool AddDefault(StringPiece path, string* error);
+
+ /// Reset state. Keeps all nodes and edges, but restores them to the
+ /// state where we haven't yet examined the disk for dirty state.
+ void Reset();
+
+ /// Dump the nodes and Pools (useful for debugging).
+ void Dump();
+
+ /// @return the root node(s) of the graph. (Root nodes have no output edges).
+ /// @param error where to write the error message if somethings went wrong.
+ vector<Node*> RootNodes(string* error);
+ vector<Node*> DefaultNodes(string* error);
+
+ /// Mapping of path -> Node.
+ typedef ExternalStringHashMap<Node*>::Type Paths;
+ Paths paths_;
+
+ /// All the rules used in the graph.
+ map<string, const Rule*> rules_;
+
+ /// All the pools used in the graph.
+ map<string, Pool*> pools_;
+
+ /// All the edges of the graph.
+ vector<Edge*> edges_;
+
+ BindingEnv bindings_;
+ vector<Node*> defaults_;
+};
+
+#endif // NINJA_STATE_H_
diff --git a/ninja/src/state_test.cc b/ninja/src/state_test.cc
new file mode 100644
index 00000000000..af2bff19b36
--- /dev/null
+++ b/ninja/src/state_test.cc
@@ -0,0 +1,47 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gtest/gtest.h>
+
+#include "graph.h"
+#include "state.h"
+
+namespace {
+
+TEST(State, Basic) {
+ State state;
+
+ EvalString command;
+ command.AddText("cat ");
+ command.AddSpecial("in");
+ command.AddText(" > ");
+ command.AddSpecial("out");
+
+ Rule* rule = new Rule("cat");
+ rule->AddBinding("command", command);
+ state.AddRule(rule);
+
+ Edge* edge = state.AddEdge(rule);
+ state.AddIn(edge, "in1");
+ state.AddIn(edge, "in2");
+ state.AddOut(edge, "out");
+
+ EXPECT_EQ("cat in1 in2 > out", edge->EvaluateCommand());
+
+ EXPECT_FALSE(state.GetNode("in1")->dirty());
+ EXPECT_FALSE(state.GetNode("in2")->dirty());
+ EXPECT_FALSE(state.GetNode("out")->dirty());
+}
+
+} // namespace
diff --git a/ninja/src/string_piece.h b/ninja/src/string_piece.h
new file mode 100644
index 00000000000..b1bf105dbe4
--- /dev/null
+++ b/ninja/src/string_piece.h
@@ -0,0 +1,53 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_STRINGPIECE_H_
+#define NINJA_STRINGPIECE_H_
+
+#include <string>
+
+using namespace std;
+
+#include <string.h>
+
+/// StringPiece represents a slice of a string whose memory is managed
+/// externally. It is useful for reducing the number of std::strings
+/// we need to allocate.
+struct StringPiece {
+ StringPiece() : str_(NULL), len_(0) {}
+
+ /// The constructors intentionally allow for implicit conversions.
+ StringPiece(const string& str) : str_(str.data()), len_(str.size()) {}
+ StringPiece(const char* str) : str_(str), len_(strlen(str)) {}
+
+ StringPiece(const char* str, size_t len) : str_(str), len_(len) {}
+
+ bool operator==(const StringPiece& other) const {
+ return len_ == other.len_ && memcmp(str_, other.str_, len_) == 0;
+ }
+ bool operator!=(const StringPiece& other) const {
+ return !(*this == other);
+ }
+
+ /// Convert the slice into a full-fledged std::string, copying the
+ /// data into a new string.
+ string AsString() const {
+ return len_ ? string(str_, len_) : string();
+ }
+
+ const char* str_;
+ size_t len_;
+};
+
+#endif // NINJA_STRINGPIECE_H_
diff --git a/ninja/src/subprocess-posix.cc b/ninja/src/subprocess-posix.cc
new file mode 100644
index 00000000000..b396f84b2b4
--- /dev/null
+++ b/ninja/src/subprocess-posix.cc
@@ -0,0 +1,287 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "subprocess.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/wait.h>
+
+#include "util.h"
+
+Subprocess::Subprocess() : fd_(-1), pid_(-1) {
+}
+Subprocess::~Subprocess() {
+ if (fd_ >= 0)
+ close(fd_);
+ // Reap child if forgotten.
+ if (pid_ != -1)
+ Finish();
+}
+
+bool Subprocess::Start(SubprocessSet* set, const string& command) {
+ int output_pipe[2];
+ if (pipe(output_pipe) < 0)
+ Fatal("pipe: %s", strerror(errno));
+ fd_ = output_pipe[0];
+#if !defined(USE_PPOLL)
+ // On Linux and OpenBSD, we use ppoll in DoWork(); elsewhere we use pselect
+ // and so must avoid overly-large FDs.
+ if (fd_ >= static_cast<int>(FD_SETSIZE))
+ Fatal("pipe: %s", strerror(EMFILE));
+#endif // !USE_PPOLL
+ SetCloseOnExec(fd_);
+
+ pid_ = fork();
+ if (pid_ < 0)
+ Fatal("fork: %s", strerror(errno));
+
+ if (pid_ == 0) {
+ close(output_pipe[0]);
+
+ // Track which fd we use to report errors on.
+ int error_pipe = output_pipe[1];
+ do {
+ if (setpgid(0, 0) < 0)
+ break;
+
+ if (sigaction(SIGINT, &set->old_act_, 0) < 0)
+ break;
+ if (sigprocmask(SIG_SETMASK, &set->old_mask_, 0) < 0)
+ break;
+
+ // Open /dev/null over stdin.
+ int devnull = open("/dev/null", O_RDONLY);
+ if (devnull < 0)
+ break;
+ if (dup2(devnull, 0) < 0)
+ break;
+ close(devnull);
+
+ if (dup2(output_pipe[1], 1) < 0 ||
+ dup2(output_pipe[1], 2) < 0)
+ break;
+
+ // Now can use stderr for errors.
+ error_pipe = 2;
+ close(output_pipe[1]);
+
+ execl("/bin/sh", "/bin/sh", "-c", command.c_str(), (char *) NULL);
+ } while (false);
+
+ // If we get here, something went wrong; the execl should have
+ // replaced us.
+ char* err = strerror(errno);
+ if (write(error_pipe, err, strlen(err)) < 0) {
+ // If the write fails, there's nothing we can do.
+ // But this block seems necessary to silence the warning.
+ }
+ _exit(1);
+ }
+
+ close(output_pipe[1]);
+ return true;
+}
+
+void Subprocess::OnPipeReady() {
+ char buf[4 << 10];
+ ssize_t len = read(fd_, buf, sizeof(buf));
+ if (len > 0) {
+ buf_.append(buf, len);
+ } else {
+ if (len < 0)
+ Fatal("read: %s", strerror(errno));
+ close(fd_);
+ fd_ = -1;
+ }
+}
+
+ExitStatus Subprocess::Finish() {
+ assert(pid_ != -1);
+ int status;
+ if (waitpid(pid_, &status, 0) < 0)
+ Fatal("waitpid(%d): %s", pid_, strerror(errno));
+ pid_ = -1;
+
+ if (WIFEXITED(status)) {
+ int exit = WEXITSTATUS(status);
+ if (exit == 0)
+ return ExitSuccess;
+ } else if (WIFSIGNALED(status)) {
+ if (WTERMSIG(status) == SIGINT)
+ return ExitInterrupted;
+ }
+ return ExitFailure;
+}
+
+bool Subprocess::Done() const {
+ return fd_ == -1;
+}
+
+const string& Subprocess::GetOutput() const {
+ return buf_;
+}
+
+bool SubprocessSet::interrupted_;
+
+void SubprocessSet::SetInterruptedFlag(int signum) {
+ (void) signum;
+ interrupted_ = true;
+}
+
+SubprocessSet::SubprocessSet() {
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGINT);
+ if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0)
+ Fatal("sigprocmask: %s", strerror(errno));
+
+ struct sigaction act;
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = SetInterruptedFlag;
+ if (sigaction(SIGINT, &act, &old_act_) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+}
+
+SubprocessSet::~SubprocessSet() {
+ Clear();
+
+ if (sigaction(SIGINT, &old_act_, 0) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0)
+ Fatal("sigprocmask: %s", strerror(errno));
+}
+
+Subprocess *SubprocessSet::Add(const string& command) {
+ Subprocess *subprocess = new Subprocess;
+ if (!subprocess->Start(this, command)) {
+ delete subprocess;
+ return 0;
+ }
+ running_.push_back(subprocess);
+ return subprocess;
+}
+
+#ifdef USE_PPOLL
+bool SubprocessSet::DoWork() {
+ vector<pollfd> fds;
+ nfds_t nfds = 0;
+
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i) {
+ int fd = (*i)->fd_;
+ if (fd < 0)
+ continue;
+ pollfd pfd = { fd, POLLIN | POLLPRI, 0 };
+ fds.push_back(pfd);
+ ++nfds;
+ }
+
+ interrupted_ = false;
+ int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_);
+ if (ret == -1) {
+ if (errno != EINTR) {
+ perror("ninja: ppoll");
+ return false;
+ }
+ return interrupted_;
+ }
+
+ nfds_t cur_nfd = 0;
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ) {
+ int fd = (*i)->fd_;
+ if (fd < 0)
+ continue;
+ assert(fd == fds[cur_nfd].fd);
+ if (fds[cur_nfd++].revents) {
+ (*i)->OnPipeReady();
+ if ((*i)->Done()) {
+ finished_.push(*i);
+ i = running_.erase(i);
+ continue;
+ }
+ }
+ ++i;
+ }
+
+ return interrupted_;
+}
+
+#else // linux || __OpenBSD__
+bool SubprocessSet::DoWork() {
+ fd_set set;
+ int nfds = 0;
+ FD_ZERO(&set);
+
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i) {
+ int fd = (*i)->fd_;
+ if (fd >= 0) {
+ FD_SET(fd, &set);
+ if (nfds < fd+1)
+ nfds = fd+1;
+ }
+ }
+
+ interrupted_ = false;
+ int ret = pselect(nfds, &set, 0, 0, 0, &old_mask_);
+ if (ret == -1) {
+ if (errno != EINTR) {
+ perror("ninja: pselect");
+ return false;
+ }
+ return interrupted_;
+ }
+
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ) {
+ int fd = (*i)->fd_;
+ if (fd >= 0 && FD_ISSET(fd, &set)) {
+ (*i)->OnPipeReady();
+ if ((*i)->Done()) {
+ finished_.push(*i);
+ i = running_.erase(i);
+ continue;
+ }
+ }
+ ++i;
+ }
+
+ return interrupted_;
+}
+#endif // linux || __OpenBSD__
+
+Subprocess* SubprocessSet::NextFinished() {
+ if (finished_.empty())
+ return NULL;
+ Subprocess* subproc = finished_.front();
+ finished_.pop();
+ return subproc;
+}
+
+void SubprocessSet::Clear() {
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i)
+ kill(-(*i)->pid_, SIGINT);
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i)
+ delete *i;
+ running_.clear();
+}
diff --git a/ninja/src/subprocess-win32.cc b/ninja/src/subprocess-win32.cc
new file mode 100644
index 00000000000..1b230b640b1
--- /dev/null
+++ b/ninja/src/subprocess-win32.cc
@@ -0,0 +1,280 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "subprocess.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+
+#include "util.h"
+
+Subprocess::Subprocess() : child_(NULL) , overlapped_(), is_reading_(false) {
+}
+
+Subprocess::~Subprocess() {
+ if (pipe_) {
+ if (!CloseHandle(pipe_))
+ Win32Fatal("CloseHandle");
+ }
+ // Reap child if forgotten.
+ if (child_)
+ Finish();
+}
+
+HANDLE Subprocess::SetupPipe(HANDLE ioport) {
+ char pipe_name[100];
+ snprintf(pipe_name, sizeof(pipe_name),
+ "\\\\.\\pipe\\ninja_pid%lu_sp%p", GetCurrentProcessId(), this);
+
+ pipe_ = ::CreateNamedPipeA(pipe_name,
+ PIPE_ACCESS_INBOUND | FILE_FLAG_OVERLAPPED,
+ PIPE_TYPE_BYTE,
+ PIPE_UNLIMITED_INSTANCES,
+ 0, 0, INFINITE, NULL);
+ if (pipe_ == INVALID_HANDLE_VALUE)
+ Win32Fatal("CreateNamedPipe");
+
+ if (!CreateIoCompletionPort(pipe_, ioport, (ULONG_PTR)this, 0))
+ Win32Fatal("CreateIoCompletionPort");
+
+ memset(&overlapped_, 0, sizeof(overlapped_));
+ if (!ConnectNamedPipe(pipe_, &overlapped_) &&
+ GetLastError() != ERROR_IO_PENDING) {
+ Win32Fatal("ConnectNamedPipe");
+ }
+
+ // Get the write end of the pipe as a handle inheritable across processes.
+ HANDLE output_write_handle = CreateFile(pipe_name, GENERIC_WRITE, 0,
+ NULL, OPEN_EXISTING, 0, NULL);
+ HANDLE output_write_child;
+ if (!DuplicateHandle(GetCurrentProcess(), output_write_handle,
+ GetCurrentProcess(), &output_write_child,
+ 0, TRUE, DUPLICATE_SAME_ACCESS)) {
+ Win32Fatal("DuplicateHandle");
+ }
+ CloseHandle(output_write_handle);
+
+ return output_write_child;
+}
+
+bool Subprocess::Start(SubprocessSet* set, const string& command) {
+ HANDLE child_pipe = SetupPipe(set->ioport_);
+
+ SECURITY_ATTRIBUTES security_attributes;
+ memset(&security_attributes, 0, sizeof(SECURITY_ATTRIBUTES));
+ security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ security_attributes.bInheritHandle = TRUE;
+ // Must be inheritable so subprocesses can dup to children.
+ HANDLE nul = CreateFile("NUL", GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ &security_attributes, OPEN_EXISTING, 0, NULL);
+ if (nul == INVALID_HANDLE_VALUE)
+ Fatal("couldn't open nul");
+
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(startup_info));
+ startup_info.cb = sizeof(STARTUPINFO);
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = nul;
+ startup_info.hStdOutput = child_pipe;
+ startup_info.hStdError = child_pipe;
+
+ PROCESS_INFORMATION process_info;
+ memset(&process_info, 0, sizeof(process_info));
+
+ // Do not prepend 'cmd /c' on Windows, this breaks command
+ // lines greater than 8,191 chars.
+ if (!CreateProcessA(NULL, (char*)command.c_str(), NULL, NULL,
+ /* inherit handles */ TRUE, CREATE_NEW_PROCESS_GROUP,
+ NULL, NULL,
+ &startup_info, &process_info)) {
+ DWORD error = GetLastError();
+ if (error == ERROR_FILE_NOT_FOUND) {
+ // File (program) not found error is treated as a normal build
+ // action failure.
+ if (child_pipe)
+ CloseHandle(child_pipe);
+ CloseHandle(pipe_);
+ CloseHandle(nul);
+ pipe_ = NULL;
+ // child_ is already NULL;
+ buf_ = "CreateProcess failed: The system cannot find the file "
+ "specified.\n";
+ return true;
+ } else {
+ Win32Fatal("CreateProcess"); // pass all other errors to Win32Fatal
+ }
+ }
+
+ // Close pipe channel only used by the child.
+ if (child_pipe)
+ CloseHandle(child_pipe);
+ CloseHandle(nul);
+
+ CloseHandle(process_info.hThread);
+ child_ = process_info.hProcess;
+
+ return true;
+}
+
+void Subprocess::OnPipeReady() {
+ DWORD bytes;
+ if (!GetOverlappedResult(pipe_, &overlapped_, &bytes, TRUE)) {
+ if (GetLastError() == ERROR_BROKEN_PIPE) {
+ CloseHandle(pipe_);
+ pipe_ = NULL;
+ return;
+ }
+ Win32Fatal("GetOverlappedResult");
+ }
+
+ if (is_reading_ && bytes)
+ buf_.append(overlapped_buf_, bytes);
+
+ memset(&overlapped_, 0, sizeof(overlapped_));
+ is_reading_ = true;
+ if (!::ReadFile(pipe_, overlapped_buf_, sizeof(overlapped_buf_),
+ &bytes, &overlapped_)) {
+ if (GetLastError() == ERROR_BROKEN_PIPE) {
+ CloseHandle(pipe_);
+ pipe_ = NULL;
+ return;
+ }
+ if (GetLastError() != ERROR_IO_PENDING)
+ Win32Fatal("ReadFile");
+ }
+
+ // Even if we read any bytes in the readfile call, we'll enter this
+ // function again later and get them at that point.
+}
+
+ExitStatus Subprocess::Finish() {
+ if (!child_)
+ return ExitFailure;
+
+ // TODO: add error handling for all of these.
+ WaitForSingleObject(child_, INFINITE);
+
+ DWORD exit_code = 0;
+ GetExitCodeProcess(child_, &exit_code);
+
+ CloseHandle(child_);
+ child_ = NULL;
+
+ return exit_code == 0 ? ExitSuccess :
+ exit_code == CONTROL_C_EXIT ? ExitInterrupted :
+ ExitFailure;
+}
+
+bool Subprocess::Done() const {
+ return pipe_ == NULL;
+}
+
+const string& Subprocess::GetOutput() const {
+ return buf_;
+}
+
+HANDLE SubprocessSet::ioport_;
+
+SubprocessSet::SubprocessSet() {
+ ioport_ = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
+ if (!ioport_)
+ Win32Fatal("CreateIoCompletionPort");
+ if (!SetConsoleCtrlHandler(NotifyInterrupted, TRUE))
+ Win32Fatal("SetConsoleCtrlHandler");
+}
+
+SubprocessSet::~SubprocessSet() {
+ Clear();
+
+ SetConsoleCtrlHandler(NotifyInterrupted, FALSE);
+ CloseHandle(ioport_);
+}
+
+BOOL WINAPI SubprocessSet::NotifyInterrupted(DWORD dwCtrlType) {
+ if (dwCtrlType == CTRL_C_EVENT || dwCtrlType == CTRL_BREAK_EVENT) {
+ if (!PostQueuedCompletionStatus(ioport_, 0, 0, NULL))
+ Win32Fatal("PostQueuedCompletionStatus");
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+Subprocess *SubprocessSet::Add(const string& command) {
+ Subprocess *subprocess = new Subprocess;
+ if (!subprocess->Start(this, command)) {
+ delete subprocess;
+ return 0;
+ }
+ if (subprocess->child_)
+ running_.push_back(subprocess);
+ else
+ finished_.push(subprocess);
+ return subprocess;
+}
+
+bool SubprocessSet::DoWork() {
+ DWORD bytes_read;
+ Subprocess* subproc;
+ OVERLAPPED* overlapped;
+
+ if (!GetQueuedCompletionStatus(ioport_, &bytes_read, (PULONG_PTR)&subproc,
+ &overlapped, INFINITE)) {
+ if (GetLastError() != ERROR_BROKEN_PIPE)
+ Win32Fatal("GetQueuedCompletionStatus");
+ }
+
+ if (!subproc) // A NULL subproc indicates that we were interrupted and is
+ // delivered by NotifyInterrupted above.
+ return true;
+
+ subproc->OnPipeReady();
+
+ if (subproc->Done()) {
+ vector<Subprocess*>::iterator end =
+ std::remove(running_.begin(), running_.end(), subproc);
+ if (running_.end() != end) {
+ finished_.push(subproc);
+ running_.resize(end - running_.begin());
+ }
+ }
+
+ return false;
+}
+
+Subprocess* SubprocessSet::NextFinished() {
+ if (finished_.empty())
+ return NULL;
+ Subprocess* subproc = finished_.front();
+ finished_.pop();
+ return subproc;
+}
+
+void SubprocessSet::Clear() {
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i) {
+ if ((*i)->child_) {
+ if (!GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT,
+ GetProcessId((*i)->child_))) {
+ Win32Fatal("GenerateConsoleCtrlEvent");
+ }
+ }
+ }
+ for (vector<Subprocess*>::iterator i = running_.begin();
+ i != running_.end(); ++i)
+ delete *i;
+ running_.clear();
+}
diff --git a/ninja/src/subprocess.h b/ninja/src/subprocess.h
new file mode 100644
index 00000000000..4c1629c89b3
--- /dev/null
+++ b/ninja/src/subprocess.h
@@ -0,0 +1,98 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_SUBPROCESS_H_
+#define NINJA_SUBPROCESS_H_
+
+#include <string>
+#include <vector>
+#include <queue>
+using namespace std;
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <signal.h>
+#endif
+
+#include "exit_status.h"
+
+/// Subprocess wraps a single async subprocess. It is entirely
+/// passive: it expects the caller to notify it when its fds are ready
+/// for reading, as well as call Finish() to reap the child once done()
+/// is true.
+struct Subprocess {
+ ~Subprocess();
+
+ /// Returns ExitSuccess on successful process exit, ExitInterrupted if
+ /// the process was interrupted, ExitFailure if it otherwise failed.
+ ExitStatus Finish();
+
+ bool Done() const;
+
+ const string& GetOutput() const;
+
+ private:
+ Subprocess();
+ bool Start(struct SubprocessSet* set, const string& command);
+ void OnPipeReady();
+
+ string buf_;
+
+#ifdef _WIN32
+ /// Set up pipe_ as the parent-side pipe of the subprocess; return the
+ /// other end of the pipe, usable in the child process.
+ HANDLE SetupPipe(HANDLE ioport);
+
+ HANDLE child_;
+ HANDLE pipe_;
+ OVERLAPPED overlapped_;
+ char overlapped_buf_[4 << 10];
+ bool is_reading_;
+#else
+ int fd_;
+ pid_t pid_;
+#endif
+
+ friend struct SubprocessSet;
+};
+
+/// SubprocessSet runs a ppoll/pselect() loop around a set of Subprocesses.
+/// DoWork() waits for any state change in subprocesses; finished_
+/// is a queue of subprocesses as they finish.
+struct SubprocessSet {
+ SubprocessSet();
+ ~SubprocessSet();
+
+ Subprocess* Add(const string& command);
+ bool DoWork();
+ Subprocess* NextFinished();
+ void Clear();
+
+ vector<Subprocess*> running_;
+ queue<Subprocess*> finished_;
+
+#ifdef _WIN32
+ static BOOL WINAPI NotifyInterrupted(DWORD dwCtrlType);
+ static HANDLE ioport_;
+#else
+ static void SetInterruptedFlag(int signum);
+ static bool interrupted_;
+
+ struct sigaction old_act_;
+ sigset_t old_mask_;
+#endif
+};
+
+#endif // NINJA_SUBPROCESS_H_
diff --git a/ninja/src/subprocess_test.cc b/ninja/src/subprocess_test.cc
new file mode 100644
index 00000000000..afd90089e5f
--- /dev/null
+++ b/ninja/src/subprocess_test.cc
@@ -0,0 +1,197 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "subprocess.h"
+
+#include "test.h"
+
+#ifndef _WIN32
+// SetWithLots need setrlimit.
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <unistd.h>
+#endif
+
+namespace {
+
+#ifdef _WIN32
+const char* kSimpleCommand = "cmd /c dir \\";
+#else
+const char* kSimpleCommand = "ls /";
+#endif
+
+struct SubprocessTest : public testing::Test {
+ SubprocessSet subprocs_;
+};
+
+} // anonymous namespace
+
+// Run a command that fails and emits to stderr.
+TEST_F(SubprocessTest, BadCommandStderr) {
+ Subprocess* subproc = subprocs_.Add("cmd /c ninja_no_such_command");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ // Pretend we discovered that stderr was ready for writing.
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitFailure, subproc->Finish());
+ EXPECT_NE("", subproc->GetOutput());
+}
+
+// Run a command that does not exist
+TEST_F(SubprocessTest, NoSuchCommand) {
+ Subprocess* subproc = subprocs_.Add("ninja_no_such_command");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ // Pretend we discovered that stderr was ready for writing.
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitFailure, subproc->Finish());
+ EXPECT_NE("", subproc->GetOutput());
+#ifdef _WIN32
+ ASSERT_EQ("CreateProcess failed: The system cannot find the file "
+ "specified.\n", subproc->GetOutput());
+#endif
+}
+
+#ifndef _WIN32
+
+TEST_F(SubprocessTest, InterruptChild) {
+ Subprocess* subproc = subprocs_.Add("kill -INT $$");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitInterrupted, subproc->Finish());
+}
+
+TEST_F(SubprocessTest, InterruptParent) {
+ Subprocess* subproc = subprocs_.Add("kill -INT $PPID ; sleep 1");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return;
+ }
+
+ ADD_FAILURE() << "We should have been interrupted";
+}
+
+#endif
+
+TEST_F(SubprocessTest, SetWithSingle) {
+ Subprocess* subproc = subprocs_.Add(kSimpleCommand);
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+ ASSERT_EQ(ExitSuccess, subproc->Finish());
+ ASSERT_NE("", subproc->GetOutput());
+
+ ASSERT_EQ(1u, subprocs_.finished_.size());
+}
+
+TEST_F(SubprocessTest, SetWithMulti) {
+ Subprocess* processes[3];
+ const char* kCommands[3] = {
+ kSimpleCommand,
+#ifdef _WIN32
+ "cmd /c echo hi",
+ "cmd /c time /t",
+#else
+ "whoami",
+ "pwd",
+#endif
+ };
+
+ for (int i = 0; i < 3; ++i) {
+ processes[i] = subprocs_.Add(kCommands[i]);
+ ASSERT_NE((Subprocess *) 0, processes[i]);
+ }
+
+ ASSERT_EQ(3u, subprocs_.running_.size());
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_FALSE(processes[i]->Done());
+ ASSERT_EQ("", processes[i]->GetOutput());
+ }
+
+ while (!processes[0]->Done() || !processes[1]->Done() ||
+ !processes[2]->Done()) {
+ ASSERT_GT(subprocs_.running_.size(), 0u);
+ subprocs_.DoWork();
+ }
+
+ ASSERT_EQ(0u, subprocs_.running_.size());
+ ASSERT_EQ(3u, subprocs_.finished_.size());
+
+ for (int i = 0; i < 3; ++i) {
+ ASSERT_EQ(ExitSuccess, processes[i]->Finish());
+ ASSERT_NE("", processes[i]->GetOutput());
+ delete processes[i];
+ }
+}
+
+// OS X's process limit is less than 1025 by default
+// (|sysctl kern.maxprocperuid| is 709 on 10.7 and 10.8 and less prior to that).
+#if defined(linux) || defined(__OpenBSD__)
+TEST_F(SubprocessTest, SetWithLots) {
+ // Arbitrary big number; needs to be over 1024 to confirm we're no longer
+ // hostage to pselect.
+ const size_t kNumProcs = 1025;
+
+ // Make sure [ulimit -n] isn't going to stop us from working.
+ rlimit rlim;
+ ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlim));
+ ASSERT_GT(rlim.rlim_cur, kNumProcs)
+ << "Raise [ulimit -n] well above " << kNumProcs
+ << " to make this test go";
+
+ vector<Subprocess*> procs;
+ for (size_t i = 0; i < kNumProcs; ++i) {
+ Subprocess* subproc = subprocs_.Add("/bin/echo");
+ ASSERT_NE((Subprocess *) 0, subproc);
+ procs.push_back(subproc);
+ }
+ while (!subprocs_.running_.empty())
+ subprocs_.DoWork();
+ for (size_t i = 0; i < procs.size(); ++i) {
+ ASSERT_EQ(ExitSuccess, procs[i]->Finish());
+ ASSERT_NE("", procs[i]->GetOutput());
+ }
+ ASSERT_EQ(kNumProcs, subprocs_.finished_.size());
+}
+#endif // linux || __OpenBSD__
+
+// TODO: this test could work on Windows, just not sure how to simply
+// read stdin.
+#ifndef _WIN32
+// Verify that a command that attempts to read stdin correctly thinks
+// that stdin is closed.
+TEST_F(SubprocessTest, ReadStdin) {
+ Subprocess* subproc = subprocs_.Add("cat -");
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+ ASSERT_EQ(ExitSuccess, subproc->Finish());
+ ASSERT_EQ(1u, subprocs_.finished_.size());
+}
+#endif // _WIN32
diff --git a/ninja/src/test.cc b/ninja/src/test.cc
new file mode 100644
index 00000000000..45a92268508
--- /dev/null
+++ b/ninja/src/test.cc
@@ -0,0 +1,186 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "test.h"
+
+#include <algorithm>
+
+#include <errno.h>
+
+#include "build_log.h"
+#include "manifest_parser.h"
+#include "util.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+namespace {
+
+#ifdef _WIN32
+#ifndef _mktemp_s
+/// mingw has no mktemp. Implement one with the same type as the one
+/// found in the Windows API.
+int _mktemp_s(char* templ) {
+ char* ofs = strchr(templ, 'X');
+ sprintf(ofs, "%d", rand() % 1000000);
+ return 0;
+}
+#endif
+
+/// Windows has no mkdtemp. Implement it in terms of _mktemp_s.
+char* mkdtemp(char* name_template) {
+ int err = _mktemp_s(name_template);
+ if (err < 0) {
+ perror("_mktemp_s");
+ return NULL;
+ }
+
+ err = _mkdir(name_template);
+ if (err < 0) {
+ perror("mkdir");
+ return NULL;
+ }
+
+ return name_template;
+}
+#endif // _WIN32
+
+string GetSystemTempDir() {
+#ifdef _WIN32
+ char buf[1024];
+ if (!GetTempPath(sizeof(buf), buf))
+ return "";
+ return buf;
+#else
+ const char* tempdir = getenv("TMPDIR");
+ if (tempdir)
+ return tempdir;
+ return "/tmp";
+#endif
+}
+
+} // anonymous namespace
+
+StateTestWithBuiltinRules::StateTestWithBuiltinRules() {
+ AddCatRule(&state_);
+}
+
+void StateTestWithBuiltinRules::AddCatRule(State* state) {
+ AssertParse(state,
+"rule cat\n"
+" command = cat $in > $out\n");
+}
+
+Node* StateTestWithBuiltinRules::GetNode(const string& path) {
+ return state_.GetNode(path);
+}
+
+void AssertParse(State* state, const char* input) {
+ ManifestParser parser(state, NULL);
+ string err;
+ ASSERT_TRUE(parser.ParseTest(input, &err)) << err;
+ ASSERT_EQ("", err);
+}
+
+void AssertHash(const char* expected, uint64_t actual) {
+ ASSERT_EQ(BuildLog::LogEntry::HashCommand(expected), actual);
+}
+
+void VirtualFileSystem::Create(const string& path,
+ const string& contents) {
+ files_[path].mtime = now_;
+ files_[path].contents = contents;
+ files_created_.insert(path);
+}
+
+TimeStamp VirtualFileSystem::Stat(const string& path) {
+ FileMap::iterator i = files_.find(path);
+ if (i != files_.end())
+ return i->second.mtime;
+ return 0;
+}
+
+bool VirtualFileSystem::WriteFile(const string& path, const string& contents) {
+ Create(path, contents);
+ return true;
+}
+
+bool VirtualFileSystem::MakeDir(const string& path) {
+ directories_made_.push_back(path);
+ return true; // success
+}
+
+string VirtualFileSystem::ReadFile(const string& path, string* err) {
+ files_read_.push_back(path);
+ FileMap::iterator i = files_.find(path);
+ if (i != files_.end())
+ return i->second.contents;
+ return "";
+}
+
+int VirtualFileSystem::RemoveFile(const string& path) {
+ if (find(directories_made_.begin(), directories_made_.end(), path)
+ != directories_made_.end())
+ return -1;
+ FileMap::iterator i = files_.find(path);
+ if (i != files_.end()) {
+ files_.erase(i);
+ files_removed_.insert(path);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+void ScopedTempDir::CreateAndEnter(const string& name) {
+ // First change into the system temp dir and save it for cleanup.
+ start_dir_ = GetSystemTempDir();
+ if (start_dir_.empty())
+ Fatal("couldn't get system temp dir");
+ if (chdir(start_dir_.c_str()) < 0)
+ Fatal("chdir: %s", strerror(errno));
+
+ // Create a temporary subdirectory of that.
+ char name_template[1024];
+ strcpy(name_template, name.c_str());
+ strcat(name_template, "-XXXXXX");
+ char* tempname = mkdtemp(name_template);
+ if (!tempname)
+ Fatal("mkdtemp: %s", strerror(errno));
+ temp_dir_name_ = tempname;
+
+ // chdir into the new temporary directory.
+ if (chdir(temp_dir_name_.c_str()) < 0)
+ Fatal("chdir: %s", strerror(errno));
+}
+
+void ScopedTempDir::Cleanup() {
+ if (temp_dir_name_.empty())
+ return; // Something went wrong earlier.
+
+ // Move out of the directory we're about to clobber.
+ if (chdir(start_dir_.c_str()) < 0)
+ Fatal("chdir: %s", strerror(errno));
+
+#ifdef _WIN32
+ string command = "rmdir /s /q " + temp_dir_name_;
+#else
+ string command = "rm -rf " + temp_dir_name_;
+#endif
+ if (system(command.c_str()) < 0)
+ Fatal("system: %s", strerror(errno));
+
+ temp_dir_name_.clear();
+}
diff --git a/ninja/src/test.h b/ninja/src/test.h
new file mode 100644
index 00000000000..9f29e07473d
--- /dev/null
+++ b/ninja/src/test.h
@@ -0,0 +1,98 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_TEST_H_
+#define NINJA_TEST_H_
+
+#include <gtest/gtest.h>
+
+#include "disk_interface.h"
+#include "state.h"
+#include "util.h"
+
+// Support utilites for tests.
+
+struct Node;
+
+/// A base test fixture that includes a State object with a
+/// builtin "cat" rule.
+struct StateTestWithBuiltinRules : public testing::Test {
+ StateTestWithBuiltinRules();
+
+ /// Add a "cat" rule to \a state. Used by some tests; it's
+ /// otherwise done by the ctor to state_.
+ void AddCatRule(State* state);
+
+ /// Short way to get a Node by its path from state_.
+ Node* GetNode(const string& path);
+
+ State state_;
+};
+
+void AssertParse(State* state, const char* input);
+void AssertHash(const char* expected, uint64_t actual);
+
+/// An implementation of DiskInterface that uses an in-memory representation
+/// of disk state. It also logs file accesses and directory creations
+/// so it can be used by tests to verify disk access patterns.
+struct VirtualFileSystem : public DiskInterface {
+ VirtualFileSystem() : now_(1) {}
+
+ /// "Create" a file with contents.
+ void Create(const string& path, const string& contents);
+
+ /// Tick "time" forwards; subsequent file operations will be newer than
+ /// previous ones.
+ int Tick() {
+ return ++now_;
+ }
+
+ // DiskInterface
+ virtual TimeStamp Stat(const string& path);
+ virtual bool WriteFile(const string& path, const string& contents);
+ virtual bool MakeDir(const string& path);
+ virtual string ReadFile(const string& path, string* err);
+ virtual int RemoveFile(const string& path);
+
+ /// An entry for a single in-memory file.
+ struct Entry {
+ int mtime;
+ string contents;
+ };
+
+ vector<string> directories_made_;
+ vector<string> files_read_;
+ typedef map<string, Entry> FileMap;
+ FileMap files_;
+ set<string> files_removed_;
+ set<string> files_created_;
+
+ /// A simple fake timestamp for file operations.
+ int now_;
+};
+
+struct ScopedTempDir {
+ /// Create a temporary directory and chdir into it.
+ void CreateAndEnter(const string& name);
+
+ /// Clean up the temporary directory.
+ void Cleanup();
+
+ /// The temp directory containing our dir.
+ string start_dir_;
+ /// The subdirectory name for our dir, or empty if it hasn't been set up.
+ string temp_dir_name_;
+};
+
+#endif // NINJA_TEST_H_
diff --git a/ninja/src/timestamp.h b/ninja/src/timestamp.h
new file mode 100644
index 00000000000..cee7ba8f21b
--- /dev/null
+++ b/ninja/src/timestamp.h
@@ -0,0 +1,24 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_TIMESTAMP_H_
+#define NINJA_TIMESTAMP_H_
+
+// When considering file modification times we only care to compare
+// them against one another -- we never convert them to an absolute
+// real time. On POSIX we use time_t (seconds since epoch) and on
+// Windows we use a different value. Both fit in an int.
+typedef int TimeStamp;
+
+#endif // NINJA_TIMESTAMP_H_
diff --git a/ninja/src/util.cc b/ninja/src/util.cc
new file mode 100644
index 00000000000..b9c2c0d59b9
--- /dev/null
+++ b/ninja/src/util.cc
@@ -0,0 +1,379 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "util.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#include <io.h>
+#include <share.h>
+#endif
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+
+#include <vector>
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#elif defined(__SVR4) && defined(__sun)
+#include <unistd.h>
+#include <sys/loadavg.h>
+#elif defined(linux) || defined(__GLIBC__)
+#include <sys/sysinfo.h>
+#endif
+
+#include "edit_distance.h"
+#include "metrics.h"
+
+void Fatal(const char* msg, ...) {
+ va_list ap;
+ fprintf(stderr, "ninja: fatal: ");
+ va_start(ap, msg);
+ vfprintf(stderr, msg, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+#ifdef _WIN32
+ // On Windows, some tools may inject extra threads.
+ // exit() may block on locks held by those threads, so forcibly exit.
+ fflush(stderr);
+ fflush(stdout);
+ ExitProcess(1);
+#else
+ exit(1);
+#endif
+}
+
+void Warning(const char* msg, ...) {
+ va_list ap;
+ fprintf(stderr, "ninja: warning: ");
+ va_start(ap, msg);
+ vfprintf(stderr, msg, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+void Error(const char* msg, ...) {
+ va_list ap;
+ fprintf(stderr, "ninja: error: ");
+ va_start(ap, msg);
+ vfprintf(stderr, msg, ap);
+ va_end(ap);
+ fprintf(stderr, "\n");
+}
+
+bool CanonicalizePath(string* path, string* err) {
+ METRIC_RECORD("canonicalize str");
+ size_t len = path->size();
+ char* str = 0;
+ if (len > 0)
+ str = &(*path)[0];
+ if (!CanonicalizePath(str, &len, err))
+ return false;
+ path->resize(len);
+ return true;
+}
+
+bool CanonicalizePath(char* path, size_t* len, string* err) {
+ // WARNING: this function is performance-critical; please benchmark
+ // any changes you make to it.
+ METRIC_RECORD("canonicalize path");
+ if (*len == 0) {
+ *err = "empty path";
+ return false;
+ }
+
+ const int kMaxPathComponents = 30;
+ char* components[kMaxPathComponents];
+ int component_count = 0;
+
+ char* start = path;
+ char* dst = start;
+ const char* src = start;
+ const char* end = start + *len;
+
+ if (*src == '/') {
+#ifdef _WIN32
+ // network path starts with //
+ if (*len > 1 && *(src + 1) == '/') {
+ src += 2;
+ dst += 2;
+ } else {
+ ++src;
+ ++dst;
+ }
+#else
+ ++src;
+ ++dst;
+#endif
+ }
+
+ while (src < end) {
+ if (*src == '.') {
+ if (src + 1 == end || src[1] == '/') {
+ // '.' component; eliminate.
+ src += 2;
+ continue;
+ } else if (src[1] == '.' && (src + 2 == end || src[2] == '/')) {
+ // '..' component. Back up if possible.
+ if (component_count > 0) {
+ dst = components[component_count - 1];
+ src += 3;
+ --component_count;
+ } else {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ }
+ continue;
+ }
+ }
+
+ if (*src == '/') {
+ src++;
+ continue;
+ }
+
+ if (component_count == kMaxPathComponents)
+ Fatal("path has too many components : %s", path);
+ components[component_count] = dst;
+ ++component_count;
+
+ while (*src != '/' && src != end)
+ *dst++ = *src++;
+ *dst++ = *src++; // Copy '/' or final \0 character as well.
+ }
+
+ if (dst == start) {
+ *err = "path canonicalizes to the empty path";
+ return false;
+ }
+
+ *len = dst - start - 1;
+ return true;
+}
+
+int ReadFile(const string& path, string* contents, string* err) {
+ FILE* f = fopen(path.c_str(), "r");
+ if (!f) {
+ err->assign(strerror(errno));
+ return -errno;
+ }
+
+ char buf[64 << 10];
+ size_t len;
+ while ((len = fread(buf, 1, sizeof(buf), f)) > 0) {
+ contents->append(buf, len);
+ }
+ if (ferror(f)) {
+ err->assign(strerror(errno)); // XXX errno?
+ contents->clear();
+ fclose(f);
+ return -errno;
+ }
+ fclose(f);
+ return 0;
+}
+
+void SetCloseOnExec(int fd) {
+#ifndef _WIN32
+ int flags = fcntl(fd, F_GETFD);
+ if (flags < 0) {
+ perror("fcntl(F_GETFD)");
+ } else {
+ if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
+ perror("fcntl(F_SETFD)");
+ }
+#else
+ HANDLE hd = (HANDLE) _get_osfhandle(fd);
+ if (! SetHandleInformation(hd, HANDLE_FLAG_INHERIT, 0)) {
+ fprintf(stderr, "SetHandleInformation(): %s", GetLastErrorString().c_str());
+ }
+#endif // ! _WIN32
+}
+
+
+const char* SpellcheckStringV(const string& text,
+ const vector<const char*>& words) {
+ const bool kAllowReplacements = true;
+ const int kMaxValidEditDistance = 3;
+
+ int min_distance = kMaxValidEditDistance + 1;
+ const char* result = NULL;
+ for (vector<const char*>::const_iterator i = words.begin();
+ i != words.end(); ++i) {
+ int distance = EditDistance(*i, text, kAllowReplacements,
+ kMaxValidEditDistance);
+ if (distance < min_distance) {
+ min_distance = distance;
+ result = *i;
+ }
+ }
+ return result;
+}
+
+const char* SpellcheckString(const char* text, ...) {
+ // Note: This takes a const char* instead of a string& because using
+ // va_start() with a reference parameter is undefined behavior.
+ va_list ap;
+ va_start(ap, text);
+ vector<const char*> words;
+ const char* word;
+ while ((word = va_arg(ap, const char*)))
+ words.push_back(word);
+ va_end(ap);
+ return SpellcheckStringV(text, words);
+}
+
+#ifdef _WIN32
+string GetLastErrorString() {
+ DWORD err = GetLastError();
+
+ char* msg_buf;
+ FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (char*)&msg_buf,
+ 0,
+ NULL);
+ string msg = msg_buf;
+ LocalFree(msg_buf);
+ return msg;
+}
+
+void Win32Fatal(const char* function) {
+ Fatal("%s: %s", function, GetLastErrorString().c_str());
+}
+#endif
+
+static bool islatinalpha(int c) {
+ // isalpha() is locale-dependent.
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+string StripAnsiEscapeCodes(const string& in) {
+ string stripped;
+ stripped.reserve(in.size());
+
+ for (size_t i = 0; i < in.size(); ++i) {
+ if (in[i] != '\33') {
+ // Not an escape code.
+ stripped.push_back(in[i]);
+ continue;
+ }
+
+ // Only strip CSIs for now.
+ if (i + 1 >= in.size()) break;
+ if (in[i + 1] != '[') continue; // Not a CSI.
+ i += 2;
+
+ // Skip everything up to and including the next [a-zA-Z].
+ while (i < in.size() && !islatinalpha(in[i]))
+ ++i;
+ }
+ return stripped;
+}
+
+#if defined(linux) || defined(__GLIBC__)
+int GetProcessorCount() {
+ return get_nprocs();
+}
+#elif defined(__APPLE__) || defined(__FreeBSD__)
+int GetProcessorCount() {
+ int processors;
+ size_t processors_size = sizeof(processors);
+ int name[] = {CTL_HW, HW_NCPU};
+ if (sysctl(name, sizeof(name) / sizeof(int),
+ &processors, &processors_size,
+ NULL, 0) < 0) {
+ return 0;
+ }
+ return processors;
+}
+#elif defined(_WIN32)
+int GetProcessorCount() {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+}
+#else
+// This is what get_nprocs() should be doing in the Linux implementation
+// above, but in a more standard way.
+int GetProcessorCount() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+#endif
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+double GetLoadAverage() {
+ // TODO(nicolas.despres@gmail.com): Find a way to implement it on Windows.
+ // Remember to also update Usage() when this is fixed.
+ return -0.0f;
+}
+#else
+double GetLoadAverage() {
+ double loadavg[3] = { 0.0f, 0.0f, 0.0f };
+ if (getloadavg(loadavg, 3) < 0) {
+ // Maybe we should return an error here or the availability of
+ // getloadavg(3) should be checked when ninja is configured.
+ return -0.0f;
+ }
+ return loadavg[0];
+}
+#endif // _WIN32
+
+string ElideMiddle(const string& str, size_t width) {
+ const int kMargin = 3; // Space for "...".
+ string result = str;
+ if (result.size() + kMargin > width) {
+ size_t elide_size = (width - kMargin) / 2;
+ result = result.substr(0, elide_size)
+ + "..."
+ + result.substr(result.size() - elide_size, elide_size);
+ }
+ return result;
+}
+
+bool Truncate(const string& path, size_t size, string* err) {
+#ifdef _WIN32
+ int fh = _sopen(path.c_str(), _O_RDWR | _O_CREAT, _SH_DENYNO,
+ _S_IREAD | _S_IWRITE);
+ int success = _chsize(fh, size);
+ _close(fh);
+#else
+ int success = truncate(path.c_str(), size);
+#endif
+ // Both truncate() and _chsize() return 0 on success and set errno and return
+ // -1 on failure.
+ if (success < 0) {
+ *err = strerror(errno);
+ return false;
+ }
+ return true;
+}
diff --git a/ninja/src/util.h b/ninja/src/util.h
new file mode 100644
index 00000000000..6788410d2ee
--- /dev/null
+++ b/ninja/src/util.h
@@ -0,0 +1,100 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_UTIL_H_
+#define NINJA_UTIL_H_
+
+#ifdef _WIN32
+#include "win32port.h"
+#else
+#include <stdint.h>
+#endif
+
+#include <string>
+#include <vector>
+using namespace std;
+
+#ifdef _MSC_VER
+#define NORETURN __declspec(noreturn)
+#else
+#define NORETURN __attribute__((noreturn))
+#endif
+
+/// Log a fatal message and exit.
+NORETURN void Fatal(const char* msg, ...);
+
+/// Log a warning message.
+void Warning(const char* msg, ...);
+
+/// Log an error message.
+void Error(const char* msg, ...);
+
+/// Canonicalize a path like "foo/../bar.h" into just "bar.h".
+bool CanonicalizePath(string* path, string* err);
+
+bool CanonicalizePath(char* path, size_t* len, string* err);
+
+/// Read a file to a string (in text mode: with CRLF conversion
+/// on Windows).
+/// Returns -errno and fills in \a err on error.
+int ReadFile(const string& path, string* contents, string* err);
+
+/// Mark a file descriptor to not be inherited on exec()s.
+void SetCloseOnExec(int fd);
+
+/// Given a misspelled string and a list of correct spellings, returns
+/// the closest match or NULL if there is no close enough match.
+const char* SpellcheckStringV(const string& text,
+ const vector<const char*>& words);
+
+/// Like SpellcheckStringV, but takes a NULL-terminated list.
+const char* SpellcheckString(const char* text, ...);
+
+/// Removes all Ansi escape codes (http://www.termsys.demon.co.uk/vtansi.htm).
+string StripAnsiEscapeCodes(const string& in);
+
+/// @return the number of processors on the machine. Useful for an initial
+/// guess for how many jobs to run in parallel. @return 0 on error.
+int GetProcessorCount();
+
+/// @return the load average of the machine. A negative value is returned
+/// on error.
+double GetLoadAverage();
+
+/// Elide the given string @a str with '...' in the middle if the length
+/// exceeds @a width.
+string ElideMiddle(const string& str, size_t width);
+
+/// Truncates a file to the given size.
+bool Truncate(const string& path, size_t size, string* err);
+
+#ifdef _MSC_VER
+#define snprintf _snprintf
+#define fileno _fileno
+#define unlink _unlink
+#define chdir _chdir
+#define strtoull _strtoui64
+#define getcwd _getcwd
+#define PATH_MAX _MAX_PATH
+#endif
+
+#ifdef _WIN32
+/// Convert the value returned by GetLastError() into a string.
+string GetLastErrorString();
+
+/// Calls Fatal() with a function name and GetLastErrorString.
+NORETURN void Win32Fatal(const char* function);
+#endif
+
+#endif // NINJA_UTIL_H_
diff --git a/ninja/src/util_test.cc b/ninja/src/util_test.cc
new file mode 100644
index 00000000000..1e290537c29
--- /dev/null
+++ b/ninja/src/util_test.cc
@@ -0,0 +1,165 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "util.h"
+
+#include "test.h"
+
+TEST(CanonicalizePath, PathSamples) {
+ string path;
+ string err;
+
+ EXPECT_FALSE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("empty path", err);
+
+ path = "foo.h"; err = "";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo.h", path);
+
+ path = "./foo.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo.h", path);
+
+ path = "./foo/./bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/bar.h", path);
+
+ path = "./x/foo/../bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("x/bar.h", path);
+
+ path = "./x/foo/../../bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("bar.h", path);
+
+ path = "foo//bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/bar", path);
+
+ path = "foo//.//..///bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("bar", path);
+
+ path = "./x/../foo/../../bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../bar.h", path);
+
+ path = "foo/./.";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo", path);
+
+ path = "foo/bar/..";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo", path);
+
+ path = "foo/.hidden_bar";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("foo/.hidden_bar", path);
+
+ path = "/foo";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("/foo", path);
+
+ path = "//foo";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+#ifdef _WIN32
+ EXPECT_EQ("//foo", path);
+#else
+ EXPECT_EQ("/foo", path);
+#endif
+
+ path = "/";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("", path);
+}
+
+TEST(CanonicalizePath, EmptyResult) {
+ string path;
+ string err;
+
+ EXPECT_FALSE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("empty path", err);
+
+ path = ".";
+ EXPECT_FALSE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("path canonicalizes to the empty path", err);
+
+ path = "./.";
+ EXPECT_FALSE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("path canonicalizes to the empty path", err);
+}
+
+TEST(CanonicalizePath, UpDir) {
+ string path, err;
+ path = "../../foo/bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../../foo/bar.h", path);
+
+ path = "test/../../foo/bar.h";
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("../foo/bar.h", path);
+}
+
+TEST(CanonicalizePath, AbsolutePath) {
+ string path = "/usr/include/stdio.h";
+ string err;
+ EXPECT_TRUE(CanonicalizePath(&path, &err));
+ EXPECT_EQ("/usr/include/stdio.h", path);
+}
+
+TEST(CanonicalizePath, NotNullTerminated) {
+ string path;
+ string err;
+ size_t len;
+
+ path = "foo/. bar/.";
+ len = strlen("foo/."); // Canonicalize only the part before the space.
+ EXPECT_TRUE(CanonicalizePath(&path[0], &len, &err));
+ EXPECT_EQ(strlen("foo"), len);
+ EXPECT_EQ("foo/. bar/.", string(path));
+
+ path = "foo/../file bar/.";
+ len = strlen("foo/../file");
+ EXPECT_TRUE(CanonicalizePath(&path[0], &len, &err));
+ EXPECT_EQ(strlen("file"), len);
+ EXPECT_EQ("file ./file bar/.", string(path));
+}
+
+TEST(StripAnsiEscapeCodes, EscapeAtEnd) {
+ string stripped = StripAnsiEscapeCodes("foo\33");
+ EXPECT_EQ("foo", stripped);
+
+ stripped = StripAnsiEscapeCodes("foo\33[");
+ EXPECT_EQ("foo", stripped);
+}
+
+TEST(StripAnsiEscapeCodes, StripColors) {
+ // An actual clang warning.
+ string input = "\33[1maffixmgr.cxx:286:15: \33[0m\33[0;1;35mwarning: "
+ "\33[0m\33[1musing the result... [-Wparentheses]\33[0m";
+ string stripped = StripAnsiEscapeCodes(input);
+ EXPECT_EQ("affixmgr.cxx:286:15: warning: using the result... [-Wparentheses]",
+ stripped);
+}
+
+TEST(ElideMiddle, NothingToElide) {
+ string input = "Nothing to elide in this short string.";
+ EXPECT_EQ(input, ElideMiddle(input, 80));
+}
+
+TEST(ElideMiddle, ElideInTheMiddle) {
+ string input = "01234567890123456789";
+ string elided = ElideMiddle(input, 10);
+ EXPECT_EQ("012...789", elided);
+}
diff --git a/ninja/src/version.cc b/ninja/src/version.cc
new file mode 100644
index 00000000000..18fa96a07eb
--- /dev/null
+++ b/ninja/src/version.cc
@@ -0,0 +1,53 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "version.h"
+
+#include <stdlib.h>
+
+#include "util.h"
+
+const char* kNinjaVersion = "1.3.0.git";
+
+void ParseVersion(const string& version, int* major, int* minor) {
+ size_t end = version.find('.');
+ *major = atoi(version.substr(0, end).c_str());
+ *minor = 0;
+ if (end != string::npos) {
+ size_t start = end + 1;
+ end = version.find('.', start);
+ *minor = atoi(version.substr(start, end).c_str());
+ }
+}
+
+void CheckNinjaVersion(const string& version) {
+ int bin_major, bin_minor;
+ ParseVersion(kNinjaVersion, &bin_major, &bin_minor);
+ int file_major, file_minor;
+ ParseVersion(version, &file_major, &file_minor);
+
+ if (bin_major > file_major) {
+ Warning("ninja executable version (%s) greater than build file "
+ "ninja_required_version (%s); versions may be incompatible.",
+ kNinjaVersion, version.c_str());
+ return;
+ }
+
+ if ((bin_major == file_major && bin_minor < file_minor) ||
+ bin_major < file_major) {
+ Fatal("ninja version (%s) incompatible with build file "
+ "ninja_required_version version (%s).",
+ kNinjaVersion, version.c_str());
+ }
+}
diff --git a/ninja/src/version.h b/ninja/src/version.h
new file mode 100644
index 00000000000..bd6b9ffe21e
--- /dev/null
+++ b/ninja/src/version.h
@@ -0,0 +1,32 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_VERSION_H_
+#define NINJA_VERSION_H_
+
+#include <string>
+using namespace std;
+
+/// The version number of the current Ninja release. This will always
+/// be "git" on trunk.
+extern const char* kNinjaVersion;
+
+/// Parse the major/minor components of a version string.
+void ParseVersion(const string& version, int* major, int* minor);
+
+/// Check whether \a version is compatible with the current Ninja version,
+/// aborting if not.
+void CheckNinjaVersion(const string& required_version);
+
+#endif // NINJA_VERSION_H_
diff --git a/ninja/src/win32port.h b/ninja/src/win32port.h
new file mode 100644
index 00000000000..ce3c9498e5d
--- /dev/null
+++ b/ninja/src/win32port.h
@@ -0,0 +1,31 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef NINJA_WIN32PORT_H_
+#define NINJA_WIN32PORT_H_
+
+typedef signed short int16_t;
+typedef unsigned short uint16_t;
+/// A 64-bit integer type
+typedef signed long long int64_t;
+typedef unsigned long long uint64_t;
+
+// printf format specifier for uint64_t, from C99.
+#ifndef PRIu64
+#define PRIu64 "I64u"
+#define PRIx64 "I64x"
+#endif
+
+#endif // NINJA_WIN32PORT_H_
+