summaryrefslogtreecommitdiff
path: root/ninja
diff options
context:
space:
mode:
Diffstat (limited to 'ninja')
-rw-r--r--ninja/HACKING.md46
-rw-r--r--ninja/RELEASING9
-rwxr-xr-xninja/configure.py32
-rw-r--r--ninja/doc/manual.asciidoc13
-rw-r--r--ninja/misc/afl-fuzz-tokens/kw_build1
-rw-r--r--ninja/misc/afl-fuzz-tokens/kw_default1
-rw-r--r--ninja/misc/afl-fuzz-tokens/kw_include1
-rw-r--r--ninja/misc/afl-fuzz-tokens/kw_pool1
-rw-r--r--ninja/misc/afl-fuzz-tokens/kw_rule1
-rw-r--r--ninja/misc/afl-fuzz-tokens/kw_subninja1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_a1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_b1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_colon1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_cont1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_dollar1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_eq1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_indent1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_pipe1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_pipepipe1
-rw-r--r--ninja/misc/afl-fuzz-tokens/misc_space1
-rw-r--r--ninja/misc/afl-fuzz/build.ninja5
-rw-r--r--ninja/misc/ninja-mode.el43
-rw-r--r--ninja/misc/zsh-completion2
-rw-r--r--ninja/src/build.cc193
-rw-r--r--ninja/src/build.h11
-rw-r--r--ninja/src/build_log.cc4
-rw-r--r--ninja/src/build_log.h2
-rw-r--r--ninja/src/build_log_test.cc1
-rw-r--r--ninja/src/build_test.cc146
-rw-r--r--ninja/src/clean.cc10
-rw-r--r--ninja/src/clean_test.cc85
-rw-r--r--ninja/src/depfile_parser.cc4
-rw-r--r--ninja/src/depfile_parser.in.cc4
-rw-r--r--ninja/src/depfile_parser_test.cc2
-rw-r--r--ninja/src/deps_log.cc8
-rw-r--r--ninja/src/deps_log.h4
-rw-r--r--ninja/src/deps_log_test.cc2
-rw-r--r--ninja/src/disk_interface.cc50
-rw-r--r--ninja/src/disk_interface.h11
-rw-r--r--ninja/src/disk_interface_test.cc109
-rw-r--r--ninja/src/edit_distance.cc2
-rw-r--r--ninja/src/eval_env.cc51
-rw-r--r--ninja/src/eval_env.h75
-rw-r--r--ninja/src/graph.cc120
-rw-r--r--ninja/src/graph.h44
-rw-r--r--ninja/src/graph_test.cc75
-rw-r--r--ninja/src/graphviz.cc5
-rw-r--r--ninja/src/hash_collision_bench.cc6
-rw-r--r--ninja/src/hash_map.h30
-rw-r--r--ninja/src/line_printer.cc36
-rw-r--r--ninja/src/manifest_parser.cc49
-rw-r--r--ninja/src/manifest_parser.h8
-rw-r--r--ninja/src/manifest_parser_perftest.cc19
-rw-r--r--ninja/src/manifest_parser_test.cc88
-rw-r--r--ninja/src/ninja.cc92
-rw-r--r--ninja/src/state.cc27
-rw-r--r--ninja/src/state.h15
-rw-r--r--ninja/src/state_test.cc2
-rw-r--r--ninja/src/subprocess-posix.cc67
-rw-r--r--ninja/src/subprocess.h10
-rw-r--r--ninja/src/subprocess_test.cc61
-rw-r--r--ninja/src/test.cc40
-rw-r--r--ninja/src/test.h4
-rw-r--r--ninja/src/util.cc7
-rw-r--r--ninja/src/version.cc2
65 files changed, 1196 insertions, 551 deletions
diff --git a/ninja/HACKING.md b/ninja/HACKING.md
index 059a4244a67..9c6830f5d4d 100644
--- a/ninja/HACKING.md
+++ b/ninja/HACKING.md
@@ -161,3 +161,49 @@ The trick is to install just the compilers, and not all of Visual Studio,
by following [these instructions][win7sdk].
[win7sdk]: http://www.kegel.com/wine/cl-howto-win7sdk.html
+
+### Using gcov
+
+Do a clean debug build with the right flags:
+
+ CFLAGS=-coverage LDFLAGS=-coverage ./configure.py --debug
+ ninja -t clean ninja_test && ninja ninja_test
+
+Run the test binary to generate `.gcda` and `.gcno` files in the build
+directory, then run gcov on the .o files to generate `.gcov` files in the
+root directory:
+
+ ./ninja_test
+ gcov build/*.o
+
+Look at the generated `.gcov` files directly, or use your favorit gcov viewer.
+
+### Using afl-fuzz
+
+Build with afl-clang++:
+
+ CXX=path/to/afl-1.20b/afl-clang++ ./configure.py
+ ninja
+
+Then run afl-fuzz like so:
+
+ afl-fuzz -i misc/afl-fuzz -o /tmp/afl-fuzz-out ./ninja -n -f @@
+
+You can pass `-x misc/afl-fuzz-tokens` to use the token dictionary. In my
+testing, that did not seem more effective though.
+
+#### Using afl-fuzz with asan
+
+If you want to use asan (the `isysroot` bit is only needed on OS X; if clang
+can't find C++ standard headers make sure your LLVM checkout includes a libc++
+checkout and has libc++ installed in the build directory):
+
+ CFLAGS="-fsanitize=address -isysroot $(xcrun -show-sdk-path)" \
+ LDFLAGS=-fsanitize=address CXX=path/to/afl-1.20b/afl-clang++ \
+ ./configure.py
+ AFL_CXX=path/to/clang++ ninja
+
+Make sure ninja can find the asan runtime:
+
+ DYLD_LIBRARY_PATH=path/to//lib/clang/3.7.0/lib/darwin/ \
+ afl-fuzz -i misc/afl-fuzz -o /tmp/afl-fuzz-out ./ninja -n -f @@
diff --git a/ninja/RELEASING b/ninja/RELEASING
index fd4affb0785..4d08253ff59 100644
--- a/ninja/RELEASING
+++ b/ninja/RELEASING
@@ -23,6 +23,9 @@ Make announcement on mailing list:
1. copy old mail
Update website:
-(note to self: website is now in github.com/martine/martine.github.io)
-1. rebuild manual, put in place on website
-2. update home page mention of latest version.
+1. Make sure your ninja checkout is on the v1.5.0 tag
+2. Clone https://github.com/martine/martine.github.io
+3. In that repo, `cd ninja && ./update-docs.sh`
+4. Update index.html with newest version and link to release notes
+5. git commit -m 'run update-docs.sh, 1.5.0 release'
+6. git push origin master
diff --git a/ninja/configure.py b/ninja/configure.py
index aac4ad4f0af..2eacbfeb928 100755
--- a/ninja/configure.py
+++ b/ninja/configure.py
@@ -106,8 +106,9 @@ class Bootstrap:
It also proxies all calls to an underlying ninja_syntax.Writer, to
behave like non-bootstrap mode.
"""
- def __init__(self, writer):
+ def __init__(self, writer, verbose=False):
self.writer = writer
+ self.verbose = verbose
# Map of variable name => expanded variable value.
self.vars = {}
# Map of rule name => dict of rule attributes.
@@ -163,8 +164,10 @@ class Bootstrap:
def _run_command(self, cmdline):
"""Run a subcommand, quietly. Prints the full command on error."""
try:
+ if self.verbose:
+ print(cmdline)
subprocess.check_call(cmdline, shell=True)
- except subprocess.CalledProcessError, e:
+ except subprocess.CalledProcessError:
print('when running: ', cmdline)
raise
@@ -173,6 +176,8 @@ parser = OptionParser()
profilers = ['gmon', 'pprof']
parser.add_option('--bootstrap', action='store_true',
help='bootstrap a ninja binary from nothing')
+parser.add_option('--verbose', action='store_true',
+ help='enable verbose build')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
@@ -217,7 +222,7 @@ if options.bootstrap:
# Wrap ninja_writer with the Bootstrapper, which also executes the
# commands.
print('bootstrapping ninja...')
- n = Bootstrap(n)
+ n = Bootstrap(n, verbose=options.verbose)
n.comment('This file is used to build ninja itself.')
n.comment('It is generated by ' + os.path.basename(__file__) + '.')
@@ -279,12 +284,13 @@ if platform.is_msvc():
'/wd4512', '/wd4800', '/wd4702', '/wd4819',
# Disable warnings about passing "this" during initialization.
'/wd4355',
+ # Disable warnings about ignored typedef in DbgHelp.h
+ '/wd4091',
'/GR-', # Disable RTTI.
# Disable size_t -> int truncation warning.
# We never have strings or arrays larger than 2**31.
'/wd4267',
'/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
- '/D_VARIADIC_MAX=10',
'/DNINJA_PYTHON="%s"' % options.with_python]
if options.bootstrap:
# In bootstrap mode, we have no ninja process to catch /showIncludes
@@ -310,8 +316,14 @@ else:
cflags.remove('-fno-rtti') # Needed for above pedanticness.
else:
cflags += ['-O2', '-DNDEBUG']
- if 'clang' in os.path.basename(CXX):
- cflags += ['-fcolor-diagnostics']
+ try:
+ proc = subprocess.Popen(
+ [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null'],
+ stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
+ if proc.wait() == 0:
+ cflags += ['-fdiagnostics-color']
+ except:
+ pass
if platform.is_mingw():
cflags += ['-D_WIN32_WINNT=0x0501']
ldflags = ['-L$builddir']
@@ -602,6 +614,10 @@ n.build('all', 'phony', all_targets)
n.close()
print('wrote %s.' % BUILD_FILENAME)
+verbose = ''
+if options.verbose:
+ verbose = ' -v'
+
if options.bootstrap:
print('bootstrap complete. rebuilding...')
if platform.is_windows():
@@ -609,6 +625,6 @@ if options.bootstrap:
if os.path.exists(bootstrap_exe):
os.unlink(bootstrap_exe)
os.rename('ninja.exe', bootstrap_exe)
- subprocess.check_call('ninja.bootstrap.exe', shell=True)
+ subprocess.check_call('ninja.bootstrap.exe%s' % verbose, shell=True)
else:
- subprocess.check_call('./ninja', shell=True)
+ subprocess.check_call('./ninja%s' % verbose, shell=True)
diff --git a/ninja/doc/manual.asciidoc b/ninja/doc/manual.asciidoc
index 21f4e42908c..dc4ffad72fc 100644
--- a/ninja/doc/manual.asciidoc
+++ b/ninja/doc/manual.asciidoc
@@ -1,7 +1,7 @@
Ninja
=====
Evan Martin <martine@danga.com>
-v1.5.3, Nov 2014
+v1.6.0, Jun 2015
Introduction
@@ -181,6 +181,12 @@ Run `ninja`. By default, it looks for a file named `build.ninja` in
the current directory and builds all out-of-date targets. You can
specify which targets (files) to build as command line arguments.
+There is also a special syntax `target^` for specifying a target
+as the first output of some rule containing the source you put in
+the command line, if one exists. For example, if you specify target as
+`foo.c^` then `foo.o` will get built (assuming you have those targets
+in your build files).
+
`ninja -h` prints help output. Many of Ninja's flags intentionally
match those of Make; e.g `ninja -C build -j 20` changes into the
`build` directory and runs 20 build commands in parallel. (Note that
@@ -915,9 +921,12 @@ Evaluation and scoping
Top-level variable declarations are scoped to the file they occur in.
+Rule declarations are also scoped to the file they occur in.
+_(Available since Ninja 1.6)_
+
The `subninja` keyword, used to include another `.ninja` file,
introduces a new scope. The included `subninja` file may use the
-variables from the parent file, and shadow their values for the file's
+variables and rules from the parent file, and shadow their values for the file's
scope, but it won't affect values of the variables in the parent.
To include another `.ninja` file in the current scope, much like a C
diff --git a/ninja/misc/afl-fuzz-tokens/kw_build b/ninja/misc/afl-fuzz-tokens/kw_build
new file mode 100644
index 00000000000..c795b054e5a
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/kw_build
@@ -0,0 +1 @@
+build \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/kw_default b/ninja/misc/afl-fuzz-tokens/kw_default
new file mode 100644
index 00000000000..331d858ce9b
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/kw_default
@@ -0,0 +1 @@
+default \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/kw_include b/ninja/misc/afl-fuzz-tokens/kw_include
new file mode 100644
index 00000000000..2996fba3563
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/kw_include
@@ -0,0 +1 @@
+include \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/kw_pool b/ninja/misc/afl-fuzz-tokens/kw_pool
new file mode 100644
index 00000000000..e783591ae77
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/kw_pool
@@ -0,0 +1 @@
+pool \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/kw_rule b/ninja/misc/afl-fuzz-tokens/kw_rule
new file mode 100644
index 00000000000..841e840f873
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/kw_rule
@@ -0,0 +1 @@
+rule \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/kw_subninja b/ninja/misc/afl-fuzz-tokens/kw_subninja
new file mode 100644
index 00000000000..c4fe0c78f16
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/kw_subninja
@@ -0,0 +1 @@
+subninja \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_a b/ninja/misc/afl-fuzz-tokens/misc_a
new file mode 100644
index 00000000000..2e65efe2a14
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_a
@@ -0,0 +1 @@
+a \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_b b/ninja/misc/afl-fuzz-tokens/misc_b
new file mode 100644
index 00000000000..63d8dbd40c2
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_b
@@ -0,0 +1 @@
+b \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_colon b/ninja/misc/afl-fuzz-tokens/misc_colon
new file mode 100644
index 00000000000..22ded55aa2c
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_colon
@@ -0,0 +1 @@
+: \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_cont b/ninja/misc/afl-fuzz-tokens/misc_cont
new file mode 100644
index 00000000000..857f13ad1bc
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_cont
@@ -0,0 +1 @@
+$
diff --git a/ninja/misc/afl-fuzz-tokens/misc_dollar b/ninja/misc/afl-fuzz-tokens/misc_dollar
new file mode 100644
index 00000000000..6f4f765ed69
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_dollar
@@ -0,0 +1 @@
+$ \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_eq b/ninja/misc/afl-fuzz-tokens/misc_eq
new file mode 100644
index 00000000000..851c75cc5e7
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_eq
@@ -0,0 +1 @@
+= \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_indent b/ninja/misc/afl-fuzz-tokens/misc_indent
new file mode 100644
index 00000000000..136d06384a4
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_indent
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_pipe b/ninja/misc/afl-fuzz-tokens/misc_pipe
new file mode 100644
index 00000000000..a3871d45082
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_pipe
@@ -0,0 +1 @@
+| \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_pipepipe b/ninja/misc/afl-fuzz-tokens/misc_pipepipe
new file mode 100644
index 00000000000..27cc728d691
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_pipepipe
@@ -0,0 +1 @@
+|| \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz-tokens/misc_space b/ninja/misc/afl-fuzz-tokens/misc_space
new file mode 100644
index 00000000000..0519ecba6ea
--- /dev/null
+++ b/ninja/misc/afl-fuzz-tokens/misc_space
@@ -0,0 +1 @@
+ \ No newline at end of file
diff --git a/ninja/misc/afl-fuzz/build.ninja b/ninja/misc/afl-fuzz/build.ninja
new file mode 100644
index 00000000000..52cd2f151bb
--- /dev/null
+++ b/ninja/misc/afl-fuzz/build.ninja
@@ -0,0 +1,5 @@
+rule b
+ command = clang -MMD -MF $out.d -o $out -c $in
+ description = building $out
+
+build a.o: b a.c
diff --git a/ninja/misc/ninja-mode.el b/ninja/misc/ninja-mode.el
index 71825d547bd..639e5375bb0 100644
--- a/ninja/misc/ninja-mode.el
+++ b/ninja/misc/ninja-mode.el
@@ -47,28 +47,27 @@
(defun ninja-syntax-propertize (start end)
(save-match-data
- (save-excursion
- (goto-char start)
- (while (search-forward "#" end t)
- (let ((match-pos (match-beginning 0)))
- (when (and
- ;; Is it the first non-white character on the line?
- (eq match-pos (save-excursion (back-to-indentation) (point)))
- (save-excursion
- (goto-char (line-end-position 0))
- (or
- ;; If we're continuting the previous line, it's not a
- ;; comment.
- (not (eq ?$ (char-before)))
- ;; Except if the previous line is a comment as well, as the
- ;; continuation dollar is ignored then.
- (nth 4 (syntax-ppss)))))
- (put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
- (let ((line-end (line-end-position)))
- ;; Avoid putting properties past the end of the buffer.
- ;; Otherwise we get an `args-out-of-range' error.
- (unless (= line-end (1+ (buffer-size)))
- (put-text-property line-end (1+ line-end) 'syntax-table '(12))))))))))
+ (goto-char start)
+ (while (search-forward "#" end t)
+ (let ((match-pos (match-beginning 0)))
+ (when (and
+ ;; Is it the first non-white character on the line?
+ (eq match-pos (save-excursion (back-to-indentation) (point)))
+ (save-excursion
+ (goto-char (line-end-position 0))
+ (or
+ ;; If we're continuting the previous line, it's not a
+ ;; comment.
+ (not (eq ?$ (char-before)))
+ ;; Except if the previous line is a comment as well, as the
+ ;; continuation dollar is ignored then.
+ (nth 4 (syntax-ppss)))))
+ (put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
+ (let ((line-end (line-end-position)))
+ ;; Avoid putting properties past the end of the buffer.
+ ;; Otherwise we get an `args-out-of-range' error.
+ (unless (= line-end (1+ (buffer-size)))
+ (put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))
;;;###autoload
(define-derived-mode ninja-mode prog-mode "ninja"
diff --git a/ninja/misc/zsh-completion b/ninja/misc/zsh-completion
index af24f89b265..fd9b3a7a7e9 100644
--- a/ninja/misc/zsh-completion
+++ b/ninja/misc/zsh-completion
@@ -32,7 +32,7 @@ __get_tools() {
}
__get_modes() {
- ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | head -n -1
+ ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d'
}
__modes() {
diff --git a/ninja/src/build.cc b/ninja/src/build.cc
index 3e74131a72e..cdb8e0a7b7c 100644
--- a/ninja/src/build.cc
+++ b/ninja/src/build.cc
@@ -278,7 +278,7 @@ bool Plan::AddSubTarget(Node* node, vector<Node*>* stack, string* err) {
return false;
}
- if (CheckDependencyCycle(node, stack, err))
+ if (CheckDependencyCycle(node, *stack, err))
return false;
if (edge->outputs_ready())
@@ -316,20 +316,32 @@ bool Plan::AddSubTarget(Node* node, vector<Node*>* stack, string* err) {
return true;
}
-bool Plan::CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err) {
- vector<Node*>::reverse_iterator ri =
- find(stack->rbegin(), stack->rend(), node);
- if (ri == stack->rend())
+bool Plan::CheckDependencyCycle(Node* node, const vector<Node*>& stack,
+ string* err) {
+ vector<Node*>::const_iterator start = stack.begin();
+ while (start != stack.end() && (*start)->in_edge() != node->in_edge())
+ ++start;
+ if (start == stack.end())
return false;
- // Add this node onto the stack to make it clearer where the loop
- // is.
- stack->push_back(node);
+ // Build error string for the cycle.
+ vector<Node*> cycle(start, stack.end());
+ cycle.push_back(node);
+
+ if (cycle.front() != cycle.back()) {
+ // Consider
+ // build a b: cat c
+ // build c: cat a
+ // stack will contain [b, c], node will be a. To not print b -> c -> a,
+ // shift by one to get c -> a -> c which makes the cycle clear.
+ cycle.erase(cycle.begin());
+ cycle.push_back(cycle.front());
+ assert(cycle.front() == cycle.back());
+ }
- vector<Node*>::iterator start = find(stack->begin(), stack->end(), node);
*err = "dependency cycle: ";
- for (vector<Node*>::iterator i = start; i != stack->end(); ++i) {
- if (i != start)
+ for (vector<Node*>::const_iterator i = cycle.begin(); i != cycle.end(); ++i) {
+ if (i != cycle.begin())
err->append(" -> ");
err->append((*i)->path());
}
@@ -339,9 +351,9 @@ bool Plan::CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err) {
Edge* Plan::FindWork() {
if (ready_.empty())
return NULL;
- set<Edge*>::iterator i = ready_.begin();
- Edge* edge = *i;
- ready_.erase(i);
+ set<Edge*>::iterator e = ready_.begin();
+ Edge* edge = *e;
+ ready_.erase(e);
return edge;
}
@@ -362,101 +374,106 @@ void Plan::ScheduleWork(Edge* edge) {
}
}
-void Plan::ResumeDelayedJobs(Edge* edge) {
- edge->pool()->EdgeFinished(*edge);
- edge->pool()->RetrieveReadyEdges(&ready_);
-}
-
void Plan::EdgeFinished(Edge* edge) {
- map<Edge*, bool>::iterator i = want_.find(edge);
- assert(i != want_.end());
- if (i->second)
+ map<Edge*, bool>::iterator e = want_.find(edge);
+ assert(e != want_.end());
+ bool directly_wanted = e->second;
+ if (directly_wanted)
--wanted_edges_;
- want_.erase(i);
+ want_.erase(e);
edge->outputs_ready_ = true;
- // See if this job frees up any delayed jobs
- ResumeDelayedJobs(edge);
+ // See if this job frees up any delayed jobs.
+ if (directly_wanted)
+ edge->pool()->EdgeFinished(*edge);
+ edge->pool()->RetrieveReadyEdges(&ready_);
// Check off any nodes we were waiting for with this edge.
- for (vector<Node*>::iterator i = edge->outputs_.begin();
- i != edge->outputs_.end(); ++i) {
- NodeFinished(*i);
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ NodeFinished(*o);
}
}
void Plan::NodeFinished(Node* node) {
// See if we we want any edges from this node.
- for (vector<Edge*>::const_iterator i = node->out_edges().begin();
- i != node->out_edges().end(); ++i) {
- map<Edge*, bool>::iterator want_i = want_.find(*i);
- if (want_i == want_.end())
+ for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+ oe != node->out_edges().end(); ++oe) {
+ map<Edge*, bool>::iterator want_e = want_.find(*oe);
+ if (want_e == want_.end())
continue;
// See if the edge is now ready.
- if ((*i)->AllInputsReady()) {
- if (want_i->second) {
- ScheduleWork(*i);
+ if ((*oe)->AllInputsReady()) {
+ if (want_e->second) {
+ ScheduleWork(*oe);
} else {
// We do not need to build this edge, but we might need to build one of
// its dependents.
- EdgeFinished(*i);
+ EdgeFinished(*oe);
}
}
}
}
-void Plan::CleanNode(DependencyScan* scan, Node* node) {
+bool Plan::CleanNode(DependencyScan* scan, Node* node, string* err) {
node->set_dirty(false);
- for (vector<Edge*>::const_iterator ei = node->out_edges().begin();
- ei != node->out_edges().end(); ++ei) {
+ for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+ oe != node->out_edges().end(); ++oe) {
// Don't process edges that we don't actually want.
- map<Edge*, bool>::iterator want_i = want_.find(*ei);
- if (want_i == want_.end() || !want_i->second)
+ map<Edge*, bool>::iterator want_e = want_.find(*oe);
+ if (want_e == want_.end() || !want_e->second)
continue;
// Don't attempt to clean an edge if it failed to load deps.
- if ((*ei)->deps_missing_)
+ if ((*oe)->deps_missing_)
continue;
// If all non-order-only inputs for this edge are now clean,
// we might have changed the dirty state of the outputs.
vector<Node*>::iterator
- begin = (*ei)->inputs_.begin(),
- end = (*ei)->inputs_.end() - (*ei)->order_only_deps_;
+ begin = (*oe)->inputs_.begin(),
+ end = (*oe)->inputs_.end() - (*oe)->order_only_deps_;
if (find_if(begin, end, mem_fun(&Node::dirty)) == end) {
// Recompute most_recent_input.
Node* most_recent_input = NULL;
- for (vector<Node*>::iterator ni = begin; ni != end; ++ni) {
- if (!most_recent_input || (*ni)->mtime() > most_recent_input->mtime())
- most_recent_input = *ni;
+ for (vector<Node*>::iterator i = begin; i != end; ++i) {
+ if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime())
+ most_recent_input = *i;
}
// Now, this edge is dirty if any of the outputs are dirty.
// If the edge isn't dirty, clean the outputs and mark the edge as not
// wanted.
- if (!scan->RecomputeOutputsDirty(*ei, most_recent_input)) {
- for (vector<Node*>::iterator ni = (*ei)->outputs_.begin();
- ni != (*ei)->outputs_.end(); ++ni) {
- CleanNode(scan, *ni);
+ bool outputs_dirty = false;
+ if (!scan->RecomputeOutputsDirty(*oe, most_recent_input,
+ &outputs_dirty, err)) {
+ return false;
+ }
+ if (!outputs_dirty) {
+ for (vector<Node*>::iterator o = (*oe)->outputs_.begin();
+ o != (*oe)->outputs_.end(); ++o) {
+ if (!CleanNode(scan, *o, err))
+ return false;
}
- want_i->second = false;
+ want_e->second = false;
--wanted_edges_;
- if (!(*ei)->is_phony())
+ if (!(*oe)->is_phony())
--command_edges_;
}
}
}
+ return true;
}
void Plan::Dump() {
printf("pending: %d\n", (int)want_.size());
- for (map<Edge*, bool>::iterator i = want_.begin(); i != want_.end(); ++i) {
- if (i->second)
+ for (map<Edge*, bool>::iterator e = want_.begin(); e != want_.end(); ++e) {
+ if (e->second)
printf("want ");
- i->first->Dump();
+ e->first->Dump();
}
printf("ready: %d\n", (int)ready_.size());
}
@@ -477,9 +494,9 @@ struct RealCommandRunner : public CommandRunner {
vector<Edge*> RealCommandRunner::GetActiveEdges() {
vector<Edge*> edges;
- for (map<Subprocess*, Edge*>::iterator i = subproc_to_edge_.begin();
- i != subproc_to_edge_.end(); ++i)
- edges.push_back(i->second);
+ for (map<Subprocess*, Edge*>::iterator e = subproc_to_edge_.begin();
+ e != subproc_to_edge_.end(); ++e)
+ edges.push_back(e->second);
return edges;
}
@@ -516,9 +533,9 @@ bool RealCommandRunner::WaitForCommand(Result* result) {
result->status = subproc->Finish();
result->output = subproc->GetOutput();
- map<Subprocess*, Edge*>::iterator i = subproc_to_edge_.find(subproc);
- result->edge = i->second;
- subproc_to_edge_.erase(i);
+ map<Subprocess*, Edge*>::iterator e = subproc_to_edge_.find(subproc);
+ result->edge = e->second;
+ subproc_to_edge_.erase(e);
delete subproc;
return true;
@@ -541,11 +558,11 @@ void Builder::Cleanup() {
vector<Edge*> active_edges = command_runner_->GetActiveEdges();
command_runner_->Abort();
- for (vector<Edge*>::iterator i = active_edges.begin();
- i != active_edges.end(); ++i) {
- string depfile = (*i)->GetUnescapedDepfile();
- for (vector<Node*>::iterator ni = (*i)->outputs_.begin();
- ni != (*i)->outputs_.end(); ++ni) {
+ for (vector<Edge*>::iterator e = active_edges.begin();
+ e != active_edges.end(); ++e) {
+ string depfile = (*e)->GetUnescapedDepfile();
+ for (vector<Node*>::iterator o = (*e)->outputs_.begin();
+ o != (*e)->outputs_.end(); ++o) {
// Only delete this output if it was actually modified. This is
// important for things like the generator where we don't want to
// delete the manifest file if we can avoid it. But if the rule
@@ -553,10 +570,12 @@ void Builder::Cleanup() {
// need to rebuild an output because of a modified header file
// mentioned in a depfile, and the command touches its depfile
// but is interrupted before it touches its output file.)
- if (!depfile.empty() ||
- (*ni)->mtime() != disk_interface_->Stat((*ni)->path())) {
- disk_interface_->RemoveFile((*ni)->path());
- }
+ string err;
+ TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), &err);
+ if (new_mtime == -1) // Log and ignore Stat() errors.
+ Error("%s", err.c_str());
+ if (!depfile.empty() || (*o)->mtime() != new_mtime)
+ disk_interface_->RemoveFile((*o)->path());
}
if (!depfile.empty())
disk_interface_->RemoveFile(depfile);
@@ -576,7 +595,6 @@ Node* Builder::AddTarget(const string& name, string* err) {
}
bool Builder::AddTarget(Node* node, string* err) {
- node->StatIfNecessary(disk_interface_);
if (Edge* in_edge = node->in_edge()) {
if (!scan_.RecomputeDirty(in_edge, err))
return false;
@@ -690,9 +708,9 @@ bool Builder::StartEdge(Edge* edge, string* err) {
// Create directories necessary for outputs.
// XXX: this will block; do we care?
- for (vector<Node*>::iterator i = edge->outputs_.begin();
- i != edge->outputs_.end(); ++i) {
- if (!disk_interface_->MakeDirs((*i)->path()))
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (!disk_interface_->MakeDirs((*o)->path()))
return false;
}
@@ -752,14 +770,17 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) {
if (edge->GetBindingBool("restat") && !config_.dry_run) {
bool node_cleaned = false;
- for (vector<Node*>::iterator i = edge->outputs_.begin();
- i != edge->outputs_.end(); ++i) {
- TimeStamp new_mtime = disk_interface_->Stat((*i)->path());
- if ((*i)->mtime() == new_mtime) {
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
+ if (new_mtime == -1)
+ return false;
+ if ((*o)->mtime() == new_mtime) {
// The rule command did not change the output. Propagate the clean
// state through the build graph.
// Note that this also applies to nonexistent outputs (mtime == 0).
- plan_.CleanNode(&scan_, *i);
+ if (!plan_.CleanNode(&scan_, *o, err))
+ return false;
node_cleaned = true;
}
}
@@ -769,14 +790,18 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) {
// (existing) non-order-only input or the depfile.
for (vector<Node*>::iterator i = edge->inputs_.begin();
i != edge->inputs_.end() - edge->order_only_deps_; ++i) {
- TimeStamp input_mtime = disk_interface_->Stat((*i)->path());
+ TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err);
+ if (input_mtime == -1)
+ return false;
if (input_mtime > restat_mtime)
restat_mtime = input_mtime;
}
string depfile = edge->GetUnescapedDepfile();
if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) {
- TimeStamp depfile_mtime = disk_interface_->Stat(depfile);
+ TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err);
+ if (depfile_mtime == -1)
+ return false;
if (depfile_mtime > restat_mtime)
restat_mtime = depfile_mtime;
}
@@ -805,7 +830,9 @@ bool Builder::FinishCommand(CommandRunner::Result* result, string* err) {
if (!deps_type.empty() && !config_.dry_run) {
assert(edge->outputs_.size() == 1 && "should have been rejected by parser");
Node* out = edge->outputs_[0];
- TimeStamp deps_mtime = disk_interface_->Stat(out->path());
+ TimeStamp deps_mtime = disk_interface_->Stat(out->path(), err);
+ if (deps_mtime == -1)
+ return false;
if (!scan_.deps_log()->RecordDeps(out, deps_mtime, deps_nodes)) {
*err = string("Error writing to deps log: ") + strerror(errno);
return false;
diff --git a/ninja/src/build.h b/ninja/src/build.h
index eb3636a9f3f..8106faaa54f 100644
--- a/ninja/src/build.h
+++ b/ninja/src/build.h
@@ -61,14 +61,16 @@ struct Plan {
void EdgeFinished(Edge* edge);
/// Clean the given node during the build.
- void CleanNode(DependencyScan* scan, Node* node);
+ /// Return false on error.
+ bool CleanNode(DependencyScan* scan, Node* node, string* err);
/// Number of edges with commands to run.
int command_edge_count() const { return command_edges_; }
private:
bool AddSubTarget(Node* node, vector<Node*>* stack, string* err);
- bool CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err);
+ bool CheckDependencyCycle(Node* node, const vector<Node*>& stack,
+ string* err);
void NodeFinished(Node* node);
/// Submits a ready edge as a candidate for execution.
@@ -76,11 +78,6 @@ private:
/// currently-full pool.
void ScheduleWork(Edge* edge);
- /// Allows jobs blocking on |edge| to potentially resume.
- /// For example, if |edge| is a member of a pool, calling this may schedule
- /// previously pending jobs in that pool.
- void ResumeDelayedJobs(Edge* edge);
-
/// Keep track of which edges we want to build in this plan. If this map does
/// not contain an entry for an edge, we do not want to build the entry or its
/// dependents. If an entry maps to false, we do not want to build it, but we
diff --git a/ninja/src/build_log.cc b/ninja/src/build_log.cc
index b6f98745788..281b851db6f 100644
--- a/ninja/src/build_log.cc
+++ b/ninja/src/build_log.cc
@@ -102,7 +102,7 @@ BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
{}
BuildLog::BuildLog()
- : log_file_(NULL), needs_recompaction_(false), quiet_(false) {}
+ : log_file_(NULL), needs_recompaction_(false) {}
BuildLog::~BuildLog() {
Close();
@@ -354,8 +354,6 @@ bool BuildLog::WriteEntry(FILE* f, const LogEntry& entry) {
bool BuildLog::Recompact(const string& path, const BuildLogUser& user,
string* err) {
METRIC_RECORD(".ninja_log recompact");
- if (!quiet_)
- printf("Recompacting log...\n");
Close();
string temp_path = path + ".recompact";
diff --git a/ninja/src/build_log.h b/ninja/src/build_log.h
index db0cfe0ebde..fe81a851f41 100644
--- a/ninja/src/build_log.h
+++ b/ninja/src/build_log.h
@@ -80,7 +80,6 @@ struct BuildLog {
/// Rewrite the known log entries, throwing away old data.
bool Recompact(const string& path, const BuildLogUser& user, string* err);
- void set_quiet(bool quiet) { quiet_ = quiet; }
typedef ExternalStringHashMap<LogEntry*>::Type Entries;
const Entries& entries() const { return entries_; }
@@ -89,7 +88,6 @@ struct BuildLog {
Entries entries_;
FILE* log_file_;
bool needs_recompaction_;
- bool quiet_;
};
#endif // NINJA_BUILD_LOG_H_
diff --git a/ninja/src/build_log_test.cc b/ninja/src/build_log_test.cc
index 7ea2117ba09..2c41ba6e8f8 100644
--- a/ninja/src/build_log_test.cc
+++ b/ninja/src/build_log_test.cc
@@ -290,7 +290,6 @@ TEST_F(BuildLogRecompactTest, Recompact) {
ASSERT_TRUE(log2.LookupByOutput("out"));
ASSERT_TRUE(log2.LookupByOutput("out2"));
// ...and force a recompaction.
- log2.set_quiet(true);
EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
log2.Close();
diff --git a/ninja/src/build_test.cc b/ninja/src/build_test.cc
index bd1cd30cff4..52a17c9cfd6 100644
--- a/ninja/src/build_test.cc
+++ b/ninja/src/build_test.cc
@@ -51,9 +51,9 @@ struct PlanTest : public StateTestWithBuiltinRules {
};
TEST_F(PlanTest, Basic) {
- AssertParse(&state_,
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat mid\n"
-"build mid: cat in\n");
+"build mid: cat in\n"));
GetNode("mid")->MarkDirty();
GetNode("out")->MarkDirty();
string err;
@@ -84,9 +84,9 @@ TEST_F(PlanTest, Basic) {
// Test that two outputs from one rule can be handled as inputs to the next.
TEST_F(PlanTest, DoubleOutputDirect) {
- AssertParse(&state_,
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat mid1 mid2\n"
-"build mid1 mid2: cat in\n");
+"build mid1 mid2: cat in\n"));
GetNode("mid1")->MarkDirty();
GetNode("mid2")->MarkDirty();
GetNode("out")->MarkDirty();
@@ -111,11 +111,11 @@ TEST_F(PlanTest, DoubleOutputDirect) {
// Test that two outputs from one rule can eventually be routed to another.
TEST_F(PlanTest, DoubleOutputIndirect) {
- AssertParse(&state_,
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat b1 b2\n"
"build b1: cat a1\n"
"build b2: cat a2\n"
-"build a1 a2: cat in\n");
+"build a1 a2: cat in\n"));
GetNode("a1")->MarkDirty();
GetNode("a2")->MarkDirty();
GetNode("b1")->MarkDirty();
@@ -149,11 +149,11 @@ TEST_F(PlanTest, DoubleOutputIndirect) {
// Test that two edges from one output can both execute.
TEST_F(PlanTest, DoubleDependent) {
- AssertParse(&state_,
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat a1 a2\n"
"build a1: cat mid\n"
"build a2: cat mid\n"
-"build mid: cat in\n");
+"build mid: cat in\n"));
GetNode("mid")->MarkDirty();
GetNode("a1")->MarkDirty();
GetNode("a2")->MarkDirty();
@@ -186,11 +186,11 @@ TEST_F(PlanTest, DoubleDependent) {
}
TEST_F(PlanTest, DependencyCycle) {
- AssertParse(&state_,
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat mid\n"
"build mid: cat in\n"
"build in: cat pre\n"
-"build pre: cat out\n");
+"build pre: cat out\n"));
GetNode("out")->MarkDirty();
GetNode("mid")->MarkDirty();
GetNode("in")->MarkDirty();
@@ -201,6 +201,43 @@ TEST_F(PlanTest, DependencyCycle) {
ASSERT_EQ("dependency cycle: out -> mid -> in -> pre -> out", err);
}
+TEST_F(PlanTest, CycleInEdgesButNotInNodes1) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a b: cat a\n"));
+ EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err));
+ ASSERT_EQ("dependency cycle: a -> a", err);
+}
+
+TEST_F(PlanTest, CycleInEdgesButNotInNodes2) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build b a: cat a\n"));
+ EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err));
+ ASSERT_EQ("dependency cycle: a -> a", err);
+}
+
+TEST_F(PlanTest, CycleInEdgesButNotInNodes3) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a b: cat c\n"
+"build c: cat a\n"));
+ EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err));
+ ASSERT_EQ("dependency cycle: c -> a -> c", err);
+}
+
+TEST_F(PlanTest, CycleInEdgesButNotInNodes4) {
+ string err;
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build d: cat c\n"
+"build c: cat b\n"
+"build b: cat a\n"
+"build a e: cat d\n"
+"build f: cat e\n"));
+ EXPECT_FALSE(plan_.AddTarget(GetNode("f"), &err));
+ ASSERT_EQ("dependency cycle: d -> c -> b -> a -> d", err);
+}
+
void PlanTest::TestPoolWithDepthOne(const char* test_case) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, test_case));
GetNode("out1")->MarkDirty();
@@ -458,8 +495,8 @@ struct BuildTest : public StateTestWithBuiltinRules, public BuildLogUser {
/// State of command_runner_ and logs contents (if specified) ARE MODIFIED.
/// Handy to check for NOOP builds, and higher-level rebuild tests.
void RebuildTarget(const string& target, const char* manifest,
- const char* log_path = NULL,
- const char* deps_path = NULL);
+ const char* log_path = NULL, const char* deps_path = NULL,
+ State* state = NULL);
// Mark a path dirty.
void Dirty(const string& path);
@@ -479,10 +516,13 @@ struct BuildTest : public StateTestWithBuiltinRules, public BuildLogUser {
};
void BuildTest::RebuildTarget(const string& target, const char* manifest,
- const char* log_path, const char* deps_path) {
- State state;
- ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
- AssertParse(&state, manifest);
+ const char* log_path, const char* deps_path,
+ State* state) {
+ State local_state, *pstate = &local_state;
+ if (state)
+ pstate = state;
+ ASSERT_NO_FATAL_FAILURE(AddCatRule(pstate));
+ AssertParse(pstate, manifest);
string err;
BuildLog build_log, *pbuild_log = NULL;
@@ -495,13 +535,13 @@ void BuildTest::RebuildTarget(const string& target, const char* manifest,
DepsLog deps_log, *pdeps_log = NULL;
if (deps_path) {
- ASSERT_TRUE(deps_log.Load(deps_path, &state, &err));
+ ASSERT_TRUE(deps_log.Load(deps_path, pstate, &err));
ASSERT_TRUE(deps_log.OpenForWrite(deps_path, &err));
ASSERT_EQ("", err);
pdeps_log = &deps_log;
}
- Builder builder(&state, config_, pbuild_log, pdeps_log, &fs_);
+ Builder builder(pstate, config_, pbuild_log, pdeps_log, &fs_);
EXPECT_TRUE(builder.AddTarget(target, &err));
command_runner_.commands_ran_.clear();
@@ -816,8 +856,7 @@ TEST_F(BuildTest, DepFileParseError) {
fs_.Create("foo.c", "");
fs_.Create("foo.o.d", "randomtext\n");
EXPECT_FALSE(builder_.AddTarget("foo.o", &err));
- EXPECT_EQ("expected depfile 'foo.o.d' to mention 'foo.o', got 'randomtext'",
- err);
+ EXPECT_EQ("foo.o.d: expected ':' in depfile", err);
}
TEST_F(BuildTest, OrderOnlyDeps) {
@@ -1056,6 +1095,31 @@ TEST_F(BuildTest, SwallowFailuresLimit) {
ASSERT_EQ("cannot make progress due to previous errors", err);
}
+TEST_F(BuildTest, PoolEdgesReadyButNotWanted) {
+ fs_.Create("x", "");
+
+ const char* manifest =
+ "pool some_pool\n"
+ " depth = 4\n"
+ "rule touch\n"
+ " command = touch $out\n"
+ " pool = some_pool\n"
+ "rule cc\n"
+ " command = touch grit\n"
+ "\n"
+ "build B.d.stamp: cc | x\n"
+ "build C.stamp: touch B.d.stamp\n"
+ "build final.stamp: touch || C.stamp\n";
+
+ RebuildTarget("final.stamp", manifest);
+
+ fs_.RemoveFile("B.d.stamp");
+
+ State save_state;
+ RebuildTarget("final.stamp", manifest, NULL, NULL, &save_state);
+ EXPECT_GE(save_state.LookupPool("some_pool")->current_use(), 0);
+}
+
struct BuildWithLogTest : public BuildTest {
BuildWithLogTest() {
builder_.SetBuildLog(&build_log_);
@@ -1414,7 +1478,7 @@ TEST_F(BuildTest, RspFileFailure) {
ASSERT_EQ("Another very long command", fs_.files_["out.rsp"].contents);
}
-// Test that contens of the RSP file behaves like a regular part of
+// Test that contents of the RSP file behaves like a regular part of
// command line, i.e. triggers a rebuild if changed
TEST_F(BuildWithLogTest, RspFileCmdLineChange) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
@@ -1484,7 +1548,7 @@ TEST_F(BuildTest, InterruptCleanup) {
EXPECT_FALSE(builder_.Build(&err));
EXPECT_EQ("interrupted by user", err);
builder_.Cleanup();
- EXPECT_GT(fs_.Stat("out1"), 0);
+ EXPECT_GT(fs_.Stat("out1", &err), 0);
err = "";
// A touched output of an interrupted command should be deleted.
@@ -1493,7 +1557,22 @@ TEST_F(BuildTest, InterruptCleanup) {
EXPECT_FALSE(builder_.Build(&err));
EXPECT_EQ("interrupted by user", err);
builder_.Cleanup();
- EXPECT_EQ(0, fs_.Stat("out2"));
+ EXPECT_EQ(0, fs_.Stat("out2", &err));
+}
+
+TEST_F(BuildTest, StatFailureAbortsBuild) {
+ const string kTooLongToStat(400, 'i');
+ ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+("build " + kTooLongToStat + ": cat " + kTooLongToStat + "\n").c_str()));
+ // Also cyclic, for good measure.
+
+ // This simulates a stat failure:
+ fs_.files_[kTooLongToStat].mtime = -1;
+ fs_.files_[kTooLongToStat].stat_error = "stat failed";
+
+ string err;
+ EXPECT_FALSE(builder_.AddTarget(kTooLongToStat, &err));
+ EXPECT_EQ("stat failed", err);
}
TEST_F(BuildTest, PhonyWithNoInputs) {
@@ -1613,7 +1692,7 @@ TEST_F(BuildWithDepsLogTest, Straightforward) {
EXPECT_EQ("", err);
// The deps file should have been removed.
- EXPECT_EQ(0, fs_.Stat("in1.d"));
+ EXPECT_EQ(0, fs_.Stat("in1.d", &err));
// Recreate it for the next step.
fs_.Create("in1.d", "out: in2");
deps_log.Close();
@@ -1693,7 +1772,7 @@ TEST_F(BuildWithDepsLogTest, ObsoleteDeps) {
fs_.Create("out", "");
// The deps file should have been removed, so no need to timestamp it.
- EXPECT_EQ(0, fs_.Stat("in1.d"));
+ EXPECT_EQ(0, fs_.Stat("in1.d", &err));
{
State state;
@@ -2043,6 +2122,23 @@ TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) {
ASSERT_EQ(0u, command_runner_.commands_ran_.size());
}
+TEST_F(BuildTest, WrongOutputInDepfileCausesRebuild) {
+ string err;
+ const char* manifest =
+"rule cc\n"
+" command = cc $in\n"
+" depfile = $out.d\n"
+"build foo.o: cc foo.c\n";
+
+ fs_.Create("foo.c", "");
+ fs_.Create("foo.o", "");
+ fs_.Create("header.h", "");
+ fs_.Create("foo.o.d", "bar.o.d: header.h\n");
+
+ RebuildTarget("foo.o", manifest, "build_log", "ninja_deps");
+ ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
TEST_F(BuildTest, Console) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule console\n"
diff --git a/ninja/src/clean.cc b/ninja/src/clean.cc
index 98c638cc74f..1d6ba9e9679 100644
--- a/ninja/src/clean.cc
+++ b/ninja/src/clean.cc
@@ -49,7 +49,11 @@ int Cleaner::RemoveFile(const string& path) {
}
bool Cleaner::FileExists(const string& path) {
- return disk_interface_->Stat(path) > 0;
+ string err;
+ TimeStamp mtime = disk_interface_->Stat(path, &err);
+ if (mtime == -1)
+ Error("%s", err.c_str());
+ return mtime > 0; // Treat Stat() errors as "file does not exist".
}
void Cleaner::Report(const string& path) {
@@ -220,7 +224,7 @@ int Cleaner::CleanRule(const char* rule) {
assert(rule);
Reset();
- const Rule* r = state_->LookupRule(rule);
+ const Rule* r = state_->bindings_.LookupRule(rule);
if (r) {
CleanRule(r);
} else {
@@ -237,7 +241,7 @@ int Cleaner::CleanRules(int rule_count, char* rules[]) {
PrintHeader();
for (int i = 0; i < rule_count; ++i) {
const char* rule_name = rules[i];
- const Rule* rule = state_->LookupRule(rule_name);
+ const Rule* rule = state_->bindings_.LookupRule(rule_name);
if (rule) {
if (IsVerbose())
printf("Rule %s\n", rule_name);
diff --git a/ninja/src/clean_test.cc b/ninja/src/clean_test.cc
index 5869bbb4a7a..395343b314f 100644
--- a/ninja/src/clean_test.cc
+++ b/ninja/src/clean_test.cc
@@ -44,10 +44,11 @@ TEST_F(CleanTest, CleanAll) {
EXPECT_EQ(4u, fs_.files_removed_.size());
// Check they are removed.
- EXPECT_EQ(0, fs_.Stat("in1"));
- EXPECT_EQ(0, fs_.Stat("out1"));
- EXPECT_EQ(0, fs_.Stat("in2"));
- EXPECT_EQ(0, fs_.Stat("out2"));
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_EQ(0, fs_.Stat("in2", &err));
+ EXPECT_EQ(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
EXPECT_EQ(0, cleaner.CleanAll());
@@ -75,10 +76,11 @@ TEST_F(CleanTest, CleanAllDryRun) {
EXPECT_EQ(0u, fs_.files_removed_.size());
// Check they are not removed.
- EXPECT_NE(0, fs_.Stat("in1"));
- EXPECT_NE(0, fs_.Stat("out1"));
- EXPECT_NE(0, fs_.Stat("in2"));
- EXPECT_NE(0, fs_.Stat("out2"));
+ string err;
+ EXPECT_LT(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
EXPECT_EQ(0, cleaner.CleanAll());
@@ -105,10 +107,11 @@ TEST_F(CleanTest, CleanTarget) {
EXPECT_EQ(2u, fs_.files_removed_.size());
// Check they are removed.
- EXPECT_EQ(0, fs_.Stat("in1"));
- EXPECT_EQ(0, fs_.Stat("out1"));
- EXPECT_NE(0, fs_.Stat("in2"));
- EXPECT_NE(0, fs_.Stat("out2"));
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
@@ -135,11 +138,12 @@ TEST_F(CleanTest, CleanTargetDryRun) {
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
- // Check they are removed.
- EXPECT_NE(0, fs_.Stat("in1"));
- EXPECT_NE(0, fs_.Stat("out1"));
- EXPECT_NE(0, fs_.Stat("in2"));
- EXPECT_NE(0, fs_.Stat("out2"));
+ // Check they are not removed.
+ string err;
+ EXPECT_LT(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
@@ -168,10 +172,11 @@ TEST_F(CleanTest, CleanRule) {
EXPECT_EQ(2u, fs_.files_removed_.size());
// Check they are removed.
- EXPECT_EQ(0, fs_.Stat("in1"));
- EXPECT_NE(0, fs_.Stat("out1"));
- EXPECT_EQ(0, fs_.Stat("in2"));
- EXPECT_NE(0, fs_.Stat("out2"));
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_EQ(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
@@ -200,11 +205,12 @@ TEST_F(CleanTest, CleanRuleDryRun) {
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
- // Check they are removed.
- EXPECT_NE(0, fs_.Stat("in1"));
- EXPECT_NE(0, fs_.Stat("out1"));
- EXPECT_NE(0, fs_.Stat("in2"));
- EXPECT_NE(0, fs_.Stat("out2"));
+ // Check they are not removed.
+ string err;
+ EXPECT_LT(0, fs_.Stat("in1", &err));
+ EXPECT_LT(0, fs_.Stat("out1", &err));
+ EXPECT_LT(0, fs_.Stat("in2", &err));
+ EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
@@ -328,12 +334,13 @@ TEST_F(CleanTest, CleanRsp) {
EXPECT_EQ(6u, fs_.files_removed_.size());
// Check they are removed.
- EXPECT_EQ(0, fs_.Stat("in1"));
- EXPECT_EQ(0, fs_.Stat("out1"));
- EXPECT_EQ(0, fs_.Stat("in2"));
- EXPECT_EQ(0, fs_.Stat("out2"));
- EXPECT_EQ(0, fs_.Stat("in2.rsp"));
- EXPECT_EQ(0, fs_.Stat("out2.rsp"));
+ string err;
+ EXPECT_EQ(0, fs_.Stat("in1", &err));
+ EXPECT_EQ(0, fs_.Stat("out1", &err));
+ EXPECT_EQ(0, fs_.Stat("in2", &err));
+ EXPECT_EQ(0, fs_.Stat("out2", &err));
+ EXPECT_EQ(0, fs_.Stat("in2.rsp", &err));
+ EXPECT_EQ(0, fs_.Stat("out2.rsp", &err));
}
TEST_F(CleanTest, CleanFailure) {
@@ -345,6 +352,7 @@ TEST_F(CleanTest, CleanFailure) {
}
TEST_F(CleanTest, CleanPhony) {
+ string err;
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build phony: phony t1 t2\n"
"build t1: cat\n"
@@ -358,7 +366,7 @@ TEST_F(CleanTest, CleanPhony) {
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(2, cleaner.cleaned_files_count());
- EXPECT_NE(0, fs_.Stat("phony"));
+ EXPECT_LT(0, fs_.Stat("phony", &err));
fs_.Create("t1", "");
fs_.Create("t2", "");
@@ -366,7 +374,7 @@ TEST_F(CleanTest, CleanPhony) {
// Check that CleanTarget does not remove "phony".
EXPECT_EQ(0, cleaner.CleanTarget("phony"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
- EXPECT_NE(0, fs_.Stat("phony"));
+ EXPECT_LT(0, fs_.Stat("phony", &err));
}
TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) {
@@ -391,8 +399,9 @@ TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) {
EXPECT_EQ(4, cleaner.cleaned_files_count());
EXPECT_EQ(4u, fs_.files_removed_.size());
- EXPECT_EQ(0, fs_.Stat("out 1"));
- EXPECT_EQ(0, fs_.Stat("out 2"));
- EXPECT_EQ(0, fs_.Stat("out 1.d"));
- EXPECT_EQ(0, fs_.Stat("out 2.rsp"));
+ string err;
+ EXPECT_EQ(0, fs_.Stat("out 1", &err));
+ EXPECT_EQ(0, fs_.Stat("out 2", &err));
+ EXPECT_EQ(0, fs_.Stat("out 1.d", &err));
+ EXPECT_EQ(0, fs_.Stat("out 2.rsp", &err));
}
diff --git a/ninja/src/depfile_parser.cc b/ninja/src/depfile_parser.cc
index 4ca3943ff35..7268f31f370 100644
--- a/ninja/src/depfile_parser.cc
+++ b/ninja/src/depfile_parser.cc
@@ -230,5 +230,9 @@ yy16:
return false;
}
}
+ if (parsing_targets) {
+ *err = "expected ':' in depfile";
+ return false;
+ }
return true;
}
diff --git a/ninja/src/depfile_parser.in.cc b/ninja/src/depfile_parser.in.cc
index b59baf05c93..deaee5bc996 100644
--- a/ninja/src/depfile_parser.in.cc
+++ b/ninja/src/depfile_parser.in.cc
@@ -112,5 +112,9 @@ bool DepfileParser::Parse(string* content, string* err) {
return false;
}
}
+ if (parsing_targets) {
+ *err = "expected ':' in depfile";
+ return false;
+ }
return true;
}
diff --git a/ninja/src/depfile_parser_test.cc b/ninja/src/depfile_parser_test.cc
index e67ef794cac..8b57a1e1036 100644
--- a/ninja/src/depfile_parser_test.cc
+++ b/ninja/src/depfile_parser_test.cc
@@ -106,7 +106,7 @@ TEST_F(DepfileParserTest, Escapes) {
// it through.
string err;
EXPECT_TRUE(Parse(
-"\\!\\@\\#$$\\%\\^\\&\\\\",
+"\\!\\@\\#$$\\%\\^\\&\\\\:",
&err));
ASSERT_EQ("", err);
EXPECT_EQ("\\!\\@#$\\%\\^\\&\\",
diff --git a/ninja/src/deps_log.cc b/ninja/src/deps_log.cc
index fc46497847d..89c60232b7b 100644
--- a/ninja/src/deps_log.cc
+++ b/ninja/src/deps_log.cc
@@ -239,13 +239,13 @@ bool DepsLog::Load(const string& path, State* state, string* err) {
if (buf[path_size - 1] == '\0') --path_size;
if (buf[path_size - 1] == '\0') --path_size;
if (buf[path_size - 1] == '\0') --path_size;
- StringPiece path(buf, path_size);
+ StringPiece subpath(buf, path_size);
// It is not necessary to pass in a correct slash_bits here. It will
// either be a Node that's in the manifest (in which case it will already
// have a correct slash_bits that GetNode will look up), or it is an
// implicit dependency from a .d which does not affect the build command
// (and so need not have its slashes maintained).
- Node* node = state->GetNode(path, 0);
+ Node* node = state->GetNode(subpath, 0);
// Check that the expected index matches the actual index. This can only
// happen if two ninja processes write to the same deps log concurrently.
@@ -275,7 +275,7 @@ bool DepsLog::Load(const string& path, State* state, string* err) {
}
fclose(f);
- if (!Truncate(path.c_str(), offset, err))
+ if (!Truncate(path, offset, err))
return false;
// The truncate succeeded; we'll just report the load error as a
@@ -307,8 +307,6 @@ DepsLog::Deps* DepsLog::GetDeps(Node* node) {
bool DepsLog::Recompact(const string& path, string* err) {
METRIC_RECORD(".ninja_deps recompact");
- if (!quiet_)
- printf("Recompacting deps...\n");
Close();
string temp_path = path + ".recompact";
diff --git a/ninja/src/deps_log.h b/ninja/src/deps_log.h
index 9b81bc1565c..cec0257ceff 100644
--- a/ninja/src/deps_log.h
+++ b/ninja/src/deps_log.h
@@ -64,7 +64,7 @@ struct State;
/// wins, allowing updates to just be appended to the file. A separate
/// repacking step can run occasionally to remove dead records.
struct DepsLog {
- DepsLog() : needs_recompaction_(false), quiet_(false), file_(NULL) {}
+ DepsLog() : needs_recompaction_(false), file_(NULL) {}
~DepsLog();
// Writing (build-time) interface.
@@ -87,7 +87,6 @@ struct DepsLog {
/// Rewrite the known log entries, throwing away old data.
bool Recompact(const string& path, string* err);
- void set_quiet(bool quiet) { quiet_ = quiet; }
/// Returns if the deps entry for a node is still reachable from the manifest.
///
@@ -109,7 +108,6 @@ struct DepsLog {
bool RecordId(Node* node);
bool needs_recompaction_;
- bool quiet_;
FILE* file_;
/// Maps id -> Node.
diff --git a/ninja/src/deps_log_test.cc b/ninja/src/deps_log_test.cc
index ac2b315f037..cab06fba1f2 100644
--- a/ninja/src/deps_log_test.cc
+++ b/ninja/src/deps_log_test.cc
@@ -252,7 +252,6 @@ TEST_F(DepsLogTest, Recompact) {
ASSERT_EQ("foo.h", deps->nodes[0]->path());
ASSERT_EQ("baz.h", deps->nodes[1]->path());
- log.set_quiet(true);
ASSERT_TRUE(log.Recompact(kTestFilename, &err));
// The in-memory deps graph should still be valid after recompaction.
@@ -302,7 +301,6 @@ TEST_F(DepsLogTest, Recompact) {
ASSERT_EQ("foo.h", deps->nodes[0]->path());
ASSERT_EQ("baz.h", deps->nodes[1]->path());
- log.set_quiet(true);
ASSERT_TRUE(log.Recompact(kTestFilename, &err));
// The previous entries should have been removed.
diff --git a/ninja/src/disk_interface.cc b/ninja/src/disk_interface.cc
index 9810708b1e0..70282c056cf 100644
--- a/ninja/src/disk_interface.cc
+++ b/ninja/src/disk_interface.cc
@@ -23,6 +23,7 @@
#include <sys/types.h>
#ifdef _WIN32
+#include <sstream>
#include <windows.h>
#include <direct.h> // _mkdir
#endif
@@ -67,16 +68,13 @@ TimeStamp TimeStampFromFileTime(const FILETIME& filetime) {
return (TimeStamp)mtime;
}
-TimeStamp StatSingleFile(const string& path, bool quiet) {
+TimeStamp StatSingleFile(const string& path, string* err) {
WIN32_FILE_ATTRIBUTE_DATA attrs;
if (!GetFileAttributesEx(path.c_str(), GetFileExInfoStandard, &attrs)) {
- DWORD err = GetLastError();
- if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND)
+ DWORD win_err = GetLastError();
+ if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
return 0;
- if (!quiet) {
- Error("GetFileAttributesEx(%s): %s", path.c_str(),
- GetLastErrorString().c_str());
- }
+ *err = "GetFileAttributesEx(" + path + "): " + GetLastErrorString();
return -1;
}
return TimeStampFromFileTime(attrs.ftLastWriteTime);
@@ -98,7 +96,7 @@ bool IsWindows7OrLater() {
#endif
bool StatAllFilesInDir(const string& dir, map<string, TimeStamp>* stamps,
- bool quiet) {
+ string* err) {
// FindExInfoBasic is 30% faster than FindExInfoStandard.
static bool can_use_basic_info = IsWindows7OrLater();
// This is not in earlier SDKs.
@@ -111,13 +109,10 @@ bool StatAllFilesInDir(const string& dir, map<string, TimeStamp>* stamps,
FindExSearchNameMatch, NULL, 0);
if (find_handle == INVALID_HANDLE_VALUE) {
- DWORD err = GetLastError();
- if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND)
+ DWORD win_err = GetLastError();
+ if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
return true;
- if (!quiet) {
- Error("FindFirstFileExA(%s): %s", dir.c_str(),
- GetLastErrorString().c_str());
- }
+ *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString();
return false;
}
do {
@@ -139,9 +134,12 @@ bool DiskInterface::MakeDirs(const string& path) {
string dir = DirName(path);
if (dir.empty())
return true; // Reached root; assume it's there.
- TimeStamp mtime = Stat(dir);
- if (mtime < 0)
- return false; // Error.
+ string err;
+ TimeStamp mtime = Stat(dir, &err);
+ if (mtime < 0) {
+ Error("%s", err.c_str());
+ return false;
+ }
if (mtime > 0)
return true; // Exists already; we're done.
@@ -154,19 +152,19 @@ bool DiskInterface::MakeDirs(const string& path) {
// RealDiskInterface -----------------------------------------------------------
-TimeStamp RealDiskInterface::Stat(const string& path) const {
+TimeStamp RealDiskInterface::Stat(const string& path, string* err) const {
#ifdef _WIN32
// MSDN: "Naming Files, Paths, and Namespaces"
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) {
- if (!quiet_) {
- Error("Stat(%s): Filename longer than %i characters",
- path.c_str(), MAX_PATH);
- }
+ ostringstream err_stream;
+ err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH
+ << " characters";
+ *err = err_stream.str();
return -1;
}
if (!use_cache_)
- return StatSingleFile(path, quiet_);
+ return StatSingleFile(path, err);
string dir = DirName(path);
string base(path.substr(dir.size() ? dir.size() + 1 : 0));
@@ -177,7 +175,7 @@ TimeStamp RealDiskInterface::Stat(const string& path) const {
Cache::iterator ci = cache_.find(dir);
if (ci == cache_.end()) {
ci = cache_.insert(make_pair(dir, DirCache())).first;
- if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, quiet_)) {
+ if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, err)) {
cache_.erase(ci);
return -1;
}
@@ -189,9 +187,7 @@ TimeStamp RealDiskInterface::Stat(const string& path) const {
if (stat(path.c_str(), &st) < 0) {
if (errno == ENOENT || errno == ENOTDIR)
return 0;
- if (!quiet_) {
- Error("stat(%s): %s", path.c_str(), strerror(errno));
- }
+ *err = "stat(" + path + "): " + strerror(errno);
return -1;
}
return st.st_mtime;
diff --git a/ninja/src/disk_interface.h b/ninja/src/disk_interface.h
index a13bced2cc9..b61d192251d 100644
--- a/ninja/src/disk_interface.h
+++ b/ninja/src/disk_interface.h
@@ -30,7 +30,7 @@ struct DiskInterface {
/// stat() a file, returning the mtime, or 0 if missing and -1 on
/// other errors.
- virtual TimeStamp Stat(const string& path) const = 0;
+ virtual TimeStamp Stat(const string& path, string* err) const = 0;
/// Create a directory, returning false on failure.
virtual bool MakeDir(const string& path) = 0;
@@ -56,21 +56,18 @@ struct DiskInterface {
/// Implementation of DiskInterface that actually hits the disk.
struct RealDiskInterface : public DiskInterface {
- RealDiskInterface() : quiet_(false)
+ RealDiskInterface()
#ifdef _WIN32
- , use_cache_(false)
+ : use_cache_(false)
#endif
{}
virtual ~RealDiskInterface() {}
- virtual TimeStamp Stat(const string& path) const;
+ virtual TimeStamp Stat(const string& path, string* err) const;
virtual bool MakeDir(const string& path);
virtual bool WriteFile(const string& path, const string& contents);
virtual string ReadFile(const string& path, string* err);
virtual int RemoveFile(const string& path);
- /// Whether to print on errors. Used to make a test quieter.
- bool quiet_;
-
/// Whether stat information can be cached. Only has an effect on Windows.
void AllowStatCache(bool allow);
diff --git a/ninja/src/disk_interface_test.cc b/ninja/src/disk_interface_test.cc
index 05d509c00df..9d210b4114a 100644
--- a/ninja/src/disk_interface_test.cc
+++ b/ninja/src/disk_interface_test.cc
@@ -47,49 +47,64 @@ struct DiskInterfaceTest : public testing::Test {
};
TEST_F(DiskInterfaceTest, StatMissingFile) {
- EXPECT_EQ(0, disk_.Stat("nosuchfile"));
+ string err;
+ EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
+ EXPECT_EQ("", err);
// On Windows, the errno for a file in a nonexistent directory
// is different.
- EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile"));
+ EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
+ EXPECT_EQ("", err);
// On POSIX systems, the errno is different if a component of the
// path prefix is not a directory.
ASSERT_TRUE(Touch("notadir"));
- EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile"));
+ EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err));
+ EXPECT_EQ("", err);
}
TEST_F(DiskInterfaceTest, StatBadPath) {
- disk_.quiet_ = true;
+ string err;
#ifdef _WIN32
string bad_path("cc:\\foo");
- EXPECT_EQ(-1, disk_.Stat(bad_path));
+ EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+ EXPECT_NE("", err);
#else
string too_long_name(512, 'x');
- EXPECT_EQ(-1, disk_.Stat(too_long_name));
+ EXPECT_EQ(-1, disk_.Stat(too_long_name, &err));
+ EXPECT_NE("", err);
#endif
- disk_.quiet_ = false;
}
TEST_F(DiskInterfaceTest, StatExistingFile) {
+ string err;
ASSERT_TRUE(Touch("file"));
- EXPECT_GT(disk_.Stat("file"), 1);
+ EXPECT_GT(disk_.Stat("file", &err), 1);
+ EXPECT_EQ("", err);
}
TEST_F(DiskInterfaceTest, StatExistingDir) {
+ string err;
ASSERT_TRUE(disk_.MakeDir("subdir"));
ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir"));
- EXPECT_GT(disk_.Stat("."), 1);
- EXPECT_GT(disk_.Stat("subdir"), 1);
- EXPECT_GT(disk_.Stat("subdir/subsubdir"), 1);
+ EXPECT_GT(disk_.Stat(".", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
+ EXPECT_EQ("", err);
- EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/."));
- EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/subsubdir/.."));
- EXPECT_EQ(disk_.Stat("subdir/subsubdir"), disk_.Stat("subdir/subsubdir/."));
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/.", &err));
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/subsubdir/..", &err));
+ EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
+ disk_.Stat("subdir/subsubdir/.", &err));
}
#ifdef _WIN32
TEST_F(DiskInterfaceTest, StatCache) {
+ string err;
disk_.AllowStatCache(true);
ASSERT_TRUE(Touch("file1"));
@@ -100,27 +115,43 @@ TEST_F(DiskInterfaceTest, StatCache) {
ASSERT_TRUE(Touch("subdir\\SUBFILE2"));
ASSERT_TRUE(Touch("subdir\\SUBFILE3"));
- EXPECT_GT(disk_.Stat("FIle1"), 1);
- EXPECT_GT(disk_.Stat("file1"), 1);
+ EXPECT_GT(disk_.Stat("FIle1", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("file1", &err), 1);
+ EXPECT_EQ("", err);
- EXPECT_GT(disk_.Stat("subdir/subfile2"), 1);
- EXPECT_GT(disk_.Stat("sUbdir\\suBFile1"), 1);
+ EXPECT_GT(disk_.Stat("subdir/subfile2", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("sUbdir\\suBFile1", &err), 1);
+ EXPECT_EQ("", err);
- EXPECT_GT(disk_.Stat("."), 1);
- EXPECT_GT(disk_.Stat("subdir"), 1);
- EXPECT_GT(disk_.Stat("subdir/subsubdir"), 1);
+ EXPECT_GT(disk_.Stat(".", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir", &err), 1);
+ EXPECT_EQ("", err);
+ EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
+ EXPECT_EQ("", err);
- EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/."));
- EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/subsubdir/.."));
- EXPECT_EQ(disk_.Stat("subdir/subsubdir"), disk_.Stat("subdir/subsubdir/."));
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/.", &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(disk_.Stat("subdir", &err),
+ disk_.Stat("subdir/subsubdir/..", &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
+ disk_.Stat("subdir/subsubdir/.", &err));
+ EXPECT_EQ("", err);
// Test error cases.
- disk_.quiet_ = true;
string bad_path("cc:\\foo");
- EXPECT_EQ(-1, disk_.Stat(bad_path));
- EXPECT_EQ(-1, disk_.Stat(bad_path));
- EXPECT_EQ(0, disk_.Stat("nosuchfile"));
- EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile"));
+ EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+ EXPECT_NE("", err); err.clear();
+ EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+ EXPECT_NE("", err); err.clear();
+ EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
+ EXPECT_EQ("", err);
+ EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
+ EXPECT_EQ("", err);
}
#endif
@@ -168,7 +199,7 @@ struct StatTest : public StateTestWithBuiltinRules,
StatTest() : scan_(&state_, NULL, NULL, this) {}
// DiskInterface implementation.
- virtual TimeStamp Stat(const string& path) const;
+ virtual TimeStamp Stat(const string& path, string* err) const;
virtual bool WriteFile(const string& path, const string& contents) {
assert(false);
return true;
@@ -191,7 +222,7 @@ struct StatTest : public StateTestWithBuiltinRules,
mutable vector<string> stats_;
};
-TimeStamp StatTest::Stat(const string& path) const {
+TimeStamp StatTest::Stat(const string& path, string* err) const {
stats_.push_back(path);
map<string, TimeStamp>::const_iterator i = mtimes_.find(path);
if (i == mtimes_.end())
@@ -204,7 +235,9 @@ TEST_F(StatTest, Simple) {
"build out: cat in\n"));
Node* out = GetNode("out");
- out->Stat(this);
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out->in_edge(), NULL);
ASSERT_EQ(2u, stats_.size());
@@ -218,7 +251,9 @@ TEST_F(StatTest, TwoStep) {
"build mid: cat in\n"));
Node* out = GetNode("out");
- out->Stat(this);
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out->in_edge(), NULL);
ASSERT_EQ(3u, stats_.size());
@@ -236,7 +271,9 @@ TEST_F(StatTest, Tree) {
"build mid2: cat in21 in22\n"));
Node* out = GetNode("out");
- out->Stat(this);
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out->in_edge(), NULL);
ASSERT_EQ(1u + 6u, stats_.size());
@@ -255,7 +292,9 @@ TEST_F(StatTest, Middle) {
mtimes_["out"] = 1;
Node* out = GetNode("out");
- out->Stat(this);
+ string err;
+ EXPECT_TRUE(out->Stat(this, &err));
+ EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out->in_edge(), NULL);
ASSERT_FALSE(GetNode("in")->dirty());
diff --git a/ninja/src/edit_distance.cc b/ninja/src/edit_distance.cc
index 9553c6ea89d..a6719d36a37 100644
--- a/ninja/src/edit_distance.cc
+++ b/ninja/src/edit_distance.cc
@@ -28,7 +28,7 @@ int EditDistance(const StringPiece& s1,
// http://en.wikipedia.org/wiki/Levenshtein_distance
//
// Although the algorithm is typically described using an m x n
- // array, only two rows are used at a time, so this implemenation
+ // array, only two rows are used at a time, so this implementation
// just keeps two separate vectors for those two rows.
int m = s1.len_;
int n = s2.len_;
diff --git a/ninja/src/eval_env.cc b/ninja/src/eval_env.cc
index 834b7e17c86..e991d210a11 100644
--- a/ninja/src/eval_env.cc
+++ b/ninja/src/eval_env.cc
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <assert.h>
+
#include "eval_env.h"
string BindingEnv::LookupVariable(const string& var) {
@@ -27,6 +29,55 @@ void BindingEnv::AddBinding(const string& key, const string& val) {
bindings_[key] = val;
}
+void BindingEnv::AddRule(const Rule* rule) {
+ assert(LookupRuleCurrentScope(rule->name()) == NULL);
+ rules_[rule->name()] = rule;
+}
+
+const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) {
+ map<string, const Rule*>::iterator i = rules_.find(rule_name);
+ if (i == rules_.end())
+ return NULL;
+ return i->second;
+}
+
+const Rule* BindingEnv::LookupRule(const string& rule_name) {
+ map<string, const Rule*>::iterator i = rules_.find(rule_name);
+ if (i != rules_.end())
+ return i->second;
+ if (parent_)
+ return parent_->LookupRule(rule_name);
+ return NULL;
+}
+
+void Rule::AddBinding(const string& key, const EvalString& val) {
+ bindings_[key] = val;
+}
+
+const EvalString* Rule::GetBinding(const string& key) const {
+ map<string, EvalString>::const_iterator i = bindings_.find(key);
+ if (i == bindings_.end())
+ return NULL;
+ return &i->second;
+}
+
+// static
+bool Rule::IsReservedBinding(const string& var) {
+ return var == "command" ||
+ var == "depfile" ||
+ var == "description" ||
+ var == "deps" ||
+ var == "generator" ||
+ var == "pool" ||
+ var == "restat" ||
+ var == "rspfile" ||
+ var == "rspfile_content";
+}
+
+const map<string, const Rule*>& BindingEnv::GetRules() const {
+ return rules_;
+}
+
string BindingEnv::LookupWithFallback(const string& var,
const EvalString* eval,
Env* env) {
diff --git a/ninja/src/eval_env.h b/ninja/src/eval_env.h
index f3c959acbae..28c4d1679e8 100644
--- a/ninja/src/eval_env.h
+++ b/ninja/src/eval_env.h
@@ -22,7 +22,7 @@ using namespace std;
#include "string_piece.h"
-struct EvalString;
+struct Rule;
/// An interface for a scope for variable (e.g. "$foo") lookups.
struct Env {
@@ -30,15 +30,62 @@ struct Env {
virtual string LookupVariable(const string& var) = 0;
};
+/// A tokenized string that contains variable references.
+/// Can be evaluated relative to an Env.
+struct EvalString {
+ string Evaluate(Env* env) const;
+
+ void Clear() { parsed_.clear(); }
+ bool empty() const { return parsed_.empty(); }
+
+ void AddText(StringPiece text);
+ void AddSpecial(StringPiece text);
+
+ /// Construct a human-readable representation of the parsed state,
+ /// for use in tests.
+ string Serialize() const;
+
+private:
+ enum TokenType { RAW, SPECIAL };
+ typedef vector<pair<string, TokenType> > TokenList;
+ TokenList parsed_;
+};
+
+/// An invokable build command and associated metadata (description, etc.).
+struct Rule {
+ explicit Rule(const string& name) : name_(name) {}
+
+ const string& name() const { return name_; }
+
+ typedef map<string, EvalString> Bindings;
+ void AddBinding(const string& key, const EvalString& val);
+
+ static bool IsReservedBinding(const string& var);
+
+ const EvalString* GetBinding(const string& key) const;
+
+ private:
+ // Allow the parsers to reach into this object and fill out its fields.
+ friend struct ManifestParser;
+
+ string name_;
+ map<string, EvalString> bindings_;
+};
+
/// An Env which contains a mapping of variables to values
/// as well as a pointer to a parent scope.
struct BindingEnv : public Env {
BindingEnv() : parent_(NULL) {}
- explicit BindingEnv(Env* parent) : parent_(parent) {}
+ explicit BindingEnv(BindingEnv* parent) : parent_(parent) {}
virtual ~BindingEnv() {}
virtual string LookupVariable(const string& var);
+ void AddRule(const Rule* rule);
+ const Rule* LookupRule(const string& rule_name);
+ const Rule* LookupRuleCurrentScope(const string& rule_name);
+ const map<string, const Rule*>& GetRules() const;
+
void AddBinding(const string& key, const string& val);
/// This is tricky. Edges want lookup scope to go in this order:
@@ -51,28 +98,8 @@ struct BindingEnv : public Env {
private:
map<string, string> bindings_;
- Env* parent_;
-};
-
-/// A tokenized string that contains variable references.
-/// Can be evaluated relative to an Env.
-struct EvalString {
- string Evaluate(Env* env) const;
-
- void Clear() { parsed_.clear(); }
- bool empty() const { return parsed_.empty(); }
-
- void AddText(StringPiece text);
- void AddSpecial(StringPiece text);
-
- /// Construct a human-readable representation of the parsed state,
- /// for use in tests.
- string Serialize() const;
-
-private:
- enum TokenType { RAW, SPECIAL };
- typedef vector<pair<string, TokenType> > TokenList;
- TokenList parsed_;
+ map<string, const Rule*> rules_;
+ BindingEnv* parent_;
};
#endif // NINJA_EVAL_ENV_H_
diff --git a/ninja/src/graph.cc b/ninja/src/graph.cc
index 2829669e79c..355285ca17f 100644
--- a/ninja/src/graph.cc
+++ b/ninja/src/graph.cc
@@ -27,34 +27,9 @@
#include "state.h"
#include "util.h"
-bool Node::Stat(DiskInterface* disk_interface) {
+bool Node::Stat(DiskInterface* disk_interface, string* err) {
METRIC_RECORD("node stat");
- mtime_ = disk_interface->Stat(path_);
- return mtime_ > 0;
-}
-
-void Rule::AddBinding(const string& key, const EvalString& val) {
- bindings_[key] = val;
-}
-
-const EvalString* Rule::GetBinding(const string& key) const {
- map<string, EvalString>::const_iterator i = bindings_.find(key);
- if (i == bindings_.end())
- return NULL;
- return &i->second;
-}
-
-// static
-bool Rule::IsReservedBinding(const string& var) {
- return var == "command" ||
- var == "depfile" ||
- var == "description" ||
- var == "deps" ||
- var == "generator" ||
- var == "pool" ||
- var == "restat" ||
- var == "rspfile" ||
- var == "rspfile_content";
+ return (mtime_ = disk_interface->Stat(path_, err)) != -1;
}
bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
@@ -62,10 +37,25 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
edge->outputs_ready_ = true;
edge->deps_missing_ = false;
+ // RecomputeDirty() recursively walks the graph following the input nodes
+ // of |edge| and the in_edges of these nodes. It uses the stat state of each
+ // node to mark nodes as visited and doesn't traverse across nodes that have
+ // been visited already. To make sure that every edge is visited only once
+ // (important because an edge's deps are loaded every time it's visited), mark
+ // all outputs of |edge| visited as a first step. This ensures that edges
+ // with multiple inputs and outputs are visited only once, even in cyclic
+ // graphs.
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (!(*o)->StatIfNecessary(disk_interface_, err))
+ return false;
+ }
+
if (!dep_loader_.LoadDeps(edge, err)) {
if (!err->empty())
return false;
// Failed to load dependency info: rebuild to regenerate it.
+ // LoadDeps() did EXPLAIN() already, no need to do it here.
dirty = edge->deps_missing_ = true;
}
@@ -73,7 +63,9 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
Node* most_recent_input = NULL;
for (vector<Node*>::iterator i = edge->inputs_.begin();
i != edge->inputs_.end(); ++i) {
- if ((*i)->StatIfNecessary(disk_interface_)) {
+ if (!(*i)->status_known()) {
+ if (!(*i)->StatIfNecessary(disk_interface_, err))
+ return false;
if (Edge* in_edge = (*i)->in_edge()) {
if (!RecomputeDirty(in_edge, err))
return false;
@@ -108,15 +100,14 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
// We may also be dirty due to output state: missing outputs, out of
// date outputs, etc. Visit all outputs and determine whether they're dirty.
if (!dirty)
- dirty = RecomputeOutputsDirty(edge, most_recent_input);
+ if (!RecomputeOutputsDirty(edge, most_recent_input, &dirty, err))
+ return false;
- // Finally, visit each output to mark off that we've visited it, and update
- // their dirty state if necessary.
- for (vector<Node*>::iterator i = edge->outputs_.begin();
- i != edge->outputs_.end(); ++i) {
- (*i)->StatIfNecessary(disk_interface_);
+ // Finally, visit each output and update their dirty state if necessary.
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
if (dirty)
- (*i)->MarkDirty();
+ (*o)->MarkDirty();
}
// If an edge is dirty, its outputs are normally not ready. (It's
@@ -130,16 +121,19 @@ bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
return true;
}
-bool DependencyScan::RecomputeOutputsDirty(Edge* edge,
- Node* most_recent_input) {
- string command = edge->EvaluateCommand(true);
- for (vector<Node*>::iterator i = edge->outputs_.begin();
- i != edge->outputs_.end(); ++i) {
- (*i)->StatIfNecessary(disk_interface_);
- if (RecomputeOutputDirty(edge, most_recent_input, command, *i))
+bool DependencyScan::RecomputeOutputsDirty(Edge* edge, Node* most_recent_input,
+ bool* outputs_dirty, string* err) {
+ string command = edge->EvaluateCommand(/*incl_rsp_file=*/true);
+ for (vector<Node*>::iterator o = edge->outputs_.begin();
+ o != edge->outputs_.end(); ++o) {
+ if (!(*o)->StatIfNecessary(disk_interface_, err))
+ return false;
+ if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) {
+ *outputs_dirty = true;
return true;
+ }
}
- return false;
+ return true;
}
bool DependencyScan::RecomputeOutputDirty(Edge* edge,
@@ -149,7 +143,12 @@ bool DependencyScan::RecomputeOutputDirty(Edge* edge,
if (edge->is_phony()) {
// Phony edges don't write any output. Outputs are only dirty if
// there are no inputs and we're missing the output.
- return edge->inputs_.empty() && !output->exists();
+ if (edge->inputs_.empty() && !output->exists()) {
+ EXPLAIN("output %s of phony edge with no inputs doesn't exist",
+ output->path().c_str());
+ return true;
+ }
+ return false;
}
BuildLog::LogEntry* entry = 0;
@@ -217,8 +216,8 @@ bool Edge::AllInputsReady() const {
struct EdgeEnv : public Env {
enum EscapeKind { kShellEscape, kDoNotEscape };
- explicit EdgeEnv(Edge* edge, EscapeKind escape)
- : edge_(edge), escape_in_out_(escape) {}
+ EdgeEnv(Edge* edge, EscapeKind escape)
+ : edge_(edge), escape_in_out_(escape), recursive_(false) {}
virtual string LookupVariable(const string& var);
/// Given a span of Nodes, construct a list of paths suitable for a command
@@ -227,8 +226,11 @@ struct EdgeEnv : public Env {
vector<Node*>::iterator end,
char sep);
+ private:
+ vector<string> lookups_;
Edge* edge_;
EscapeKind escape_in_out_;
+ bool recursive_;
};
string EdgeEnv::LookupVariable(const string& var) {
@@ -244,8 +246,25 @@ string EdgeEnv::LookupVariable(const string& var) {
' ');
}
+ if (recursive_) {
+ vector<string>::const_iterator it;
+ if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) {
+ string cycle;
+ for (; it != lookups_.end(); ++it)
+ cycle.append(*it + " -> ");
+ cycle.append(var);
+ Fatal(("cycle in rule variables: " + cycle).c_str());
+ }
+ }
+
// See notes on BindingEnv::LookupWithFallback.
const EvalString* eval = edge_->rule_->GetBinding(var);
+ if (recursive_ && eval)
+ lookups_.push_back(var);
+
+ // In practice, variables defined on rules never use another rule variable.
+ // For performance, only start checking for cycles after the first lookup.
+ recursive_ = true;
return edge_->env_->LookupWithFallback(var, eval, this);
}
@@ -398,12 +417,13 @@ bool ImplicitDepLoader::LoadDepFile(Edge* edge, const string& path,
&depfile.out_.len_, &unused, err))
return false;
- // Check that this depfile matches the edge's output.
+ // Check that this depfile matches the edge's output, if not return false to
+ // mark the edge as dirty.
Node* first_output = edge->outputs_[0];
StringPiece opath = StringPiece(first_output->path());
if (opath != depfile.out_) {
- *err = "expected depfile '" + path + "' to mention '" +
- first_output->path() + "', got '" + depfile.out_.AsString() + "'";
+ EXPLAIN("expected depfile '%s' to mention '%s', got '%s'", path.c_str(),
+ first_output->path().c_str(), depfile.out_.AsString().c_str());
return false;
}
@@ -439,7 +459,7 @@ bool ImplicitDepLoader::LoadDepsFromLog(Edge* edge, string* err) {
// Deps are invalid if the output is newer than the deps.
if (output->mtime() > deps->mtime) {
- EXPLAIN("stored deps info out of date for for '%s' (%d vs %d)",
+ EXPLAIN("stored deps info out of date for '%s' (%d vs %d)",
output->path().c_str(), deps->mtime, output->mtime());
return false;
}
diff --git a/ninja/src/graph.h b/ninja/src/graph.h
index 9aada38562e..5f8d41ac4f4 100644
--- a/ninja/src/graph.h
+++ b/ninja/src/graph.h
@@ -41,15 +41,14 @@ struct Node {
in_edge_(NULL),
id_(-1) {}
- /// Return true if the file exists (mtime_ got a value).
- bool Stat(DiskInterface* disk_interface);
+ /// Return false on error.
+ bool Stat(DiskInterface* disk_interface, string* err);
- /// Return true if we needed to stat.
- bool StatIfNecessary(DiskInterface* disk_interface) {
+ /// Return false on error.
+ bool StatIfNecessary(DiskInterface* disk_interface, string* err) {
if (status_known())
- return false;
- Stat(disk_interface);
- return true;
+ return true;
+ return Stat(disk_interface, err);
}
/// Mark as not-yet-stat()ed and not dirty.
@@ -121,30 +120,10 @@ private:
int id_;
};
-/// An invokable build command and associated metadata (description, etc.).
-struct Rule {
- explicit Rule(const string& name) : name_(name) {}
-
- const string& name() const { return name_; }
-
- typedef map<string, EvalString> Bindings;
- void AddBinding(const string& key, const EvalString& val);
-
- static bool IsReservedBinding(const string& var);
-
- const EvalString* GetBinding(const string& key) const;
-
- private:
- // Allow the parsers to reach into this object and fill out its fields.
- friend struct ManifestParser;
-
- string name_;
- map<string, EvalString> bindings_;
-};
-
/// An edge in the dependency graph; links between Nodes using Rules.
struct Edge {
- Edge() : rule_(NULL), env_(NULL), outputs_ready_(false), deps_missing_(false),
+ Edge() : rule_(NULL), pool_(NULL), env_(NULL),
+ outputs_ready_(false), deps_missing_(false),
implicit_deps_(0), order_only_deps_(0) {}
/// Return true if all inputs' in-edges are ready.
@@ -257,9 +236,10 @@ struct DependencyScan {
/// Returns false on failure.
bool RecomputeDirty(Edge* edge, string* err);
- /// Recompute whether any output of the edge is dirty.
- /// Returns true if so.
- bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input);
+ /// Recompute whether any output of the edge is dirty, if so sets |*dirty|.
+ /// Returns false on failure.
+ bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input,
+ bool* dirty, string* err);
BuildLog* build_log() const {
return build_log_;
diff --git a/ninja/src/graph_test.cc b/ninja/src/graph_test.cc
index 382d352defe..e41f5cc2e7c 100644
--- a/ninja/src/graph_test.cc
+++ b/ninja/src/graph_test.cc
@@ -252,6 +252,81 @@ TEST_F(GraphTest, NestedPhonyPrintsDone) {
ASSERT_FALSE(plan_.more_to_do());
}
+// Verify that cycles in graphs with multiple outputs are handled correctly
+// in RecomputeDirty() and don't cause deps to be loaded multiple times.
+TEST_F(GraphTest, CycleWithLengthZeroFromDepfile) {
+ AssertParse(&state_,
+"rule deprule\n"
+" depfile = dep.d\n"
+" command = unused\n"
+"build a b: deprule\n"
+ );
+ fs_.Create("dep.d", "a: b\n");
+
+ string err;
+ Edge* edge = GetNode("a")->in_edge();
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ // Despite the depfile causing edge to be a cycle (it has outputs a and b,
+ // but the depfile also adds b as an input), the deps should have been loaded
+ // only once:
+ EXPECT_EQ(1, edge->inputs_.size());
+ EXPECT_EQ("b", edge->inputs_[0]->path());
+}
+
+// Like CycleWithLengthZeroFromDepfile but with a higher cycle length.
+TEST_F(GraphTest, CycleWithLengthOneFromDepfile) {
+ AssertParse(&state_,
+"rule deprule\n"
+" depfile = dep.d\n"
+" command = unused\n"
+"rule r\n"
+" command = unused\n"
+"build a b: deprule\n"
+"build c: r b\n"
+ );
+ fs_.Create("dep.d", "a: c\n");
+
+ string err;
+ Edge* edge = GetNode("a")->in_edge();
+ EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+ ASSERT_EQ("", err);
+
+ // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b,
+ // but c's in_edge has b as input but the depfile also adds |edge| as
+ // output)), the deps should have been loaded only once:
+ EXPECT_EQ(1, edge->inputs_.size());
+ EXPECT_EQ("c", edge->inputs_[0]->path());
+}
+
+// Like CycleWithLengthOneFromDepfile but building a node one hop away from
+// the cycle.
+TEST_F(GraphTest, CycleWithLengthOneFromDepfileOneHopAway) {
+ AssertParse(&state_,
+"rule deprule\n"
+" depfile = dep.d\n"
+" command = unused\n"
+"rule r\n"
+" command = unused\n"
+"build a b: deprule\n"
+"build c: r b\n"
+"build d: r a\n"
+ );
+ fs_.Create("dep.d", "a: c\n");
+
+ string err;
+ EXPECT_TRUE(scan_.RecomputeDirty(GetNode("d")->in_edge(), &err));
+ ASSERT_EQ("", err);
+
+ // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b,
+ // but c's in_edge has b as input but the depfile also adds |edge| as
+ // output)), the deps should have been loaded only once:
+ Edge* edge = GetNode("a")->in_edge();
+ EXPECT_EQ(1, edge->inputs_.size());
+ EXPECT_EQ("c", edge->inputs_[0]->path());
+}
+
#ifdef _WIN32
TEST_F(GraphTest, Decanonicalize) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
diff --git a/ninja/src/graphviz.cc b/ninja/src/graphviz.cc
index 8354a2277a6..dce8b322563 100644
--- a/ninja/src/graphviz.cc
+++ b/ninja/src/graphviz.cc
@@ -15,6 +15,7 @@
#include "graphviz.h"
#include <stdio.h>
+#include <algorithm>
#include "graph.h"
@@ -22,7 +23,9 @@ void GraphViz::AddTarget(Node* node) {
if (visited_nodes_.find(node) != visited_nodes_.end())
return;
- printf("\"%p\" [label=\"%s\"]\n", node, node->path().c_str());
+ string pathstr = node->path();
+ replace(pathstr.begin(), pathstr.end(), '\\', '/');
+ printf("\"%p\" [label=\"%s\"]\n", node, pathstr.c_str());
visited_nodes_.insert(node);
Edge* edge = node->in_edge();
diff --git a/ninja/src/hash_collision_bench.cc b/ninja/src/hash_collision_bench.cc
index d0eabde6ddf..5be0531d7a0 100644
--- a/ninja/src/hash_collision_bench.cc
+++ b/ninja/src/hash_collision_bench.cc
@@ -46,7 +46,7 @@ int main() {
sort(hashes, hashes + N);
- int num_collisions = 0;
+ int collision_count = 0;
for (int i = 1; i < N; ++i) {
if (hashes[i - 1].first == hashes[i].first) {
if (strcmp(commands[hashes[i - 1].second],
@@ -54,9 +54,9 @@ int main() {
printf("collision!\n string 1: '%s'\n string 2: '%s'\n",
commands[hashes[i - 1].second],
commands[hashes[i].second]);
- num_collisions++;
+ collision_count++;
}
}
}
- printf("\n\n%d collisions after %d runs\n", num_collisions, N);
+ printf("\n\n%d collisions after %d runs\n", collision_count, N);
}
diff --git a/ninja/src/hash_map.h b/ninja/src/hash_map.h
index 77e75865901..abdba92db21 100644
--- a/ninja/src/hash_map.h
+++ b/ninja/src/hash_map.h
@@ -50,7 +50,22 @@ unsigned int MurmurHash2(const void* key, size_t len) {
return h;
}
-#ifdef _MSC_VER
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
+#include <unordered_map>
+
+namespace std {
+template<>
+struct hash<StringPiece> {
+ typedef StringPiece argument_type;
+ typedef size_t result_type;
+
+ size_t operator()(StringPiece key) const {
+ return MurmurHash2(key.str_, key.len_);
+ }
+};
+}
+
+#elif defined(_MSC_VER)
#include <hash_map>
using stdext::hash_map;
@@ -73,26 +88,17 @@ struct StringPieceCmp : public hash_compare<StringPiece> {
};
#else
-
#include <ext/hash_map>
using __gnu_cxx::hash_map;
namespace __gnu_cxx {
template<>
-struct hash<std::string> {
- size_t operator()(const std::string& s) const {
- return hash<const char*>()(s.c_str());
- }
-};
-
-template<>
struct hash<StringPiece> {
size_t operator()(StringPiece key) const {
return MurmurHash2(key.str_, key.len_);
}
};
-
}
#endif
@@ -102,7 +108,9 @@ struct hash<StringPiece> {
/// mapping StringPiece => Foo*.
template<typename V>
struct ExternalStringHashMap {
-#ifdef _MSC_VER
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
+ typedef std::unordered_map<StringPiece, V> Type;
+#elif defined(_MSC_VER)
typedef hash_map<StringPiece, V, StringPieceCmp> Type;
#else
typedef hash_map<StringPiece, V> Type;
diff --git a/ninja/src/line_printer.cc b/ninja/src/line_printer.cc
index 813f63eb5cd..2cd3e174c49 100644
--- a/ninja/src/line_printer.cc
+++ b/ninja/src/line_printer.cc
@@ -50,29 +50,21 @@ void LinePrinter::Print(string to_print, LineType type) {
return;
}
-#ifdef _WIN32
- CONSOLE_SCREEN_BUFFER_INFO csbi;
- GetConsoleScreenBufferInfo(console_, &csbi);
-#endif
-
if (smart_terminal_) {
-#ifndef _WIN32
printf("\r"); // Print over previous line, if any.
-#else
- csbi.dwCursorPosition.X = 0;
- SetConsoleCursorPosition(console_, csbi.dwCursorPosition);
-#endif
+ // On Windows, calling a C library function writing to stdout also handles
+ // pausing the executable when the "Pause" key or Ctrl-S is pressed.
}
if (smart_terminal_ && type == ELIDE) {
#ifdef _WIN32
- // Don't use the full width or console will move to next line.
- size_t width = static_cast<size_t>(csbi.dwSize.X) - 1;
- to_print = ElideMiddle(to_print, width);
- // We don't want to have the cursor spamming back and forth, so
- // use WriteConsoleOutput instead which updates the contents of
- // the buffer, but doesn't move the cursor position.
+ CONSOLE_SCREEN_BUFFER_INFO csbi;
GetConsoleScreenBufferInfo(console_, &csbi);
+
+ to_print = ElideMiddle(to_print, static_cast<size_t>(csbi.dwSize.X));
+ // We don't want to have the cursor spamming back and forth, so instead of
+ // printf use WriteConsoleOutput which updates the contents of the buffer,
+ // but doesn't move the cursor position.
COORD buf_size = { csbi.dwSize.X, 1 };
COORD zero_zero = { 0, 0 };
SMALL_RECT target = {
@@ -80,16 +72,12 @@ void LinePrinter::Print(string to_print, LineType type) {
static_cast<SHORT>(csbi.dwCursorPosition.X + csbi.dwSize.X - 1),
csbi.dwCursorPosition.Y
};
- CHAR_INFO* char_data = new CHAR_INFO[csbi.dwSize.X];
- memset(char_data, 0, sizeof(CHAR_INFO) * csbi.dwSize.X);
- for (int i = 0; i < csbi.dwSize.X; ++i) {
- char_data[i].Char.AsciiChar = ' ';
+ vector<CHAR_INFO> char_data(csbi.dwSize.X);
+ for (size_t i = 0; i < static_cast<size_t>(csbi.dwSize.X); ++i) {
+ char_data[i].Char.AsciiChar = i < to_print.size() ? to_print[i] : ' ';
char_data[i].Attributes = csbi.wAttributes;
}
- for (size_t i = 0; i < to_print.size(); ++i)
- char_data[i].Char.AsciiChar = to_print[i];
- WriteConsoleOutput(console_, char_data, buf_size, zero_zero, &target);
- delete[] char_data;
+ WriteConsoleOutput(console_, &char_data[0], buf_size, zero_zero, &target);
#else
// Limit output to width of the terminal if provided so we don't cause
// line-wrapping.
diff --git a/ninja/src/manifest_parser.cc b/ninja/src/manifest_parser.cc
index 388b5bc679e..e8c0436041e 100644
--- a/ninja/src/manifest_parser.cc
+++ b/ninja/src/manifest_parser.cc
@@ -24,8 +24,10 @@
#include "util.h"
#include "version.h"
-ManifestParser::ManifestParser(State* state, FileReader* file_reader)
- : state_(state), file_reader_(file_reader) {
+ManifestParser::ManifestParser(State* state, FileReader* file_reader,
+ bool dupe_edge_should_err)
+ : state_(state), file_reader_(file_reader),
+ dupe_edge_should_err_(dupe_edge_should_err), quiet_(false) {
env_ = &state->bindings_;
}
@@ -156,7 +158,7 @@ bool ManifestParser::ParseRule(string* err) {
if (!ExpectToken(Lexer::NEWLINE, err))
return false;
- if (state_->LookupRule(name) != NULL)
+ if (env_->LookupRuleCurrentScope(name) != NULL)
return lexer_.Error("duplicate rule '" + name + "'", err);
Rule* rule = new Rule(name); // XXX scoped_ptr
@@ -185,7 +187,7 @@ bool ManifestParser::ParseRule(string* err) {
if (rule->bindings_["command"].empty())
return lexer_.Error("expected 'command =' line", err);
- state_->AddRule(rule);
+ env_->AddRule(rule);
return true;
}
@@ -252,7 +254,7 @@ bool ManifestParser::ParseEdge(string* err) {
if (!lexer_.ReadIdent(&rule_name))
return lexer_.Error("expected build command name", err);
- const Rule* rule = state_->LookupRule(rule_name);
+ const Rule* rule = env_->LookupRule(rule_name);
if (!rule)
return lexer_.Error("unknown build rule '" + rule_name + "'", err);
@@ -298,16 +300,16 @@ bool ManifestParser::ParseEdge(string* err) {
return false;
// Bindings on edges are rare, so allocate per-edge envs only when needed.
- bool hasIdent = lexer_.PeekToken(Lexer::INDENT);
- BindingEnv* env = hasIdent ? new BindingEnv(env_) : env_;
- while (hasIdent) {
+ bool has_indent_token = lexer_.PeekToken(Lexer::INDENT);
+ BindingEnv* env = has_indent_token ? new BindingEnv(env_) : env_;
+ while (has_indent_token) {
string key;
EvalString val;
if (!ParseLet(&key, &val, err))
return false;
env->AddBinding(key, val.Evaluate(env_));
- hasIdent = lexer_.PeekToken(Lexer::INDENT);
+ has_indent_token = lexer_.PeekToken(Lexer::INDENT);
}
Edge* edge = state_->AddEdge(rule);
@@ -321,21 +323,42 @@ bool ManifestParser::ParseEdge(string* err) {
edge->pool_ = pool;
}
- for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
+ edge->outputs_.reserve(outs.size());
+ for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
string path = i->Evaluate(env);
string path_err;
unsigned int slash_bits;
if (!CanonicalizePath(&path, &slash_bits, &path_err))
return lexer_.Error(path_err, err);
- state_->AddIn(edge, path, slash_bits);
+ if (!state_->AddOut(edge, path, slash_bits)) {
+ if (dupe_edge_should_err_) {
+ lexer_.Error("multiple rules generate " + path + " [-w dupbuild=err]",
+ err);
+ return false;
+ } else if (!quiet_) {
+ Warning("multiple rules generate %s. "
+ "builds involving this target will not be correct; "
+ "continuing anyway [-w dupbuild=warn]",
+ path.c_str());
+ }
+ }
}
- for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
+ if (edge->outputs_.empty()) {
+ // All outputs of the edge are already created by other edges. Don't add
+ // this edge. Do this check before input nodes are connected to the edge.
+ state_->edges_.pop_back();
+ delete edge;
+ return true;
+ }
+
+ edge->inputs_.reserve(ins.size());
+ for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
string path = i->Evaluate(env);
string path_err;
unsigned int slash_bits;
if (!CanonicalizePath(&path, &slash_bits, &path_err))
return lexer_.Error(path_err, err);
- state_->AddOut(edge, path, slash_bits);
+ state_->AddIn(edge, path, slash_bits);
}
edge->implicit_deps_ = implicit;
edge->order_only_deps_ = order_only;
diff --git a/ninja/src/manifest_parser.h b/ninja/src/manifest_parser.h
index 5212f72ba1b..f72cd6f932c 100644
--- a/ninja/src/manifest_parser.h
+++ b/ninja/src/manifest_parser.h
@@ -32,13 +32,15 @@ struct ManifestParser {
virtual bool ReadFile(const string& path, string* content, string* err) = 0;
};
- ManifestParser(State* state, FileReader* file_reader);
+ ManifestParser(State* state, FileReader* file_reader,
+ bool dupe_edge_should_err = false);
/// Load and parse a file.
- bool Load(const string& filename, string* err, Lexer* parent=NULL);
+ bool Load(const string& filename, string* err, Lexer* parent = NULL);
/// Parse a text string of input. Used by tests.
bool ParseTest(const string& input, string* err) {
+ quiet_ = true;
return Parse("input", input, err);
}
@@ -64,6 +66,8 @@ private:
BindingEnv* env_;
FileReader* file_reader_;
Lexer lexer_;
+ bool dupe_edge_should_err_;
+ bool quiet_;
};
#endif // NINJA_MANIFEST_PARSER_H_
diff --git a/ninja/src/manifest_parser_perftest.cc b/ninja/src/manifest_parser_perftest.cc
index ca62fb27c7b..6b56ab028e4 100644
--- a/ninja/src/manifest_parser_perftest.cc
+++ b/ninja/src/manifest_parser_perftest.cc
@@ -42,15 +42,19 @@ struct RealFileReader : public ManifestParser::FileReader {
}
};
-bool WriteFakeManifests(const string& dir) {
+bool WriteFakeManifests(const string& dir, string* err) {
RealDiskInterface disk_interface;
- if (disk_interface.Stat(dir + "/build.ninja") > 0)
- return true;
+ TimeStamp mtime = disk_interface.Stat(dir + "/build.ninja", err);
+ if (mtime != 0) // 0 means that the file doesn't exist yet.
+ return mtime != -1;
+ string command = "python misc/write_fake_manifests.py " + dir;
printf("Creating manifest data..."); fflush(stdout);
- int err = system(("python misc/write_fake_manifests.py " + dir).c_str());
+ int exit_code = system(command.c_str());
printf("done.\n");
- return err == 0;
+ if (exit_code != 0)
+ *err = "Failed to run " + command;
+ return exit_code == 0;
}
int LoadManifests(bool measure_command_evaluation) {
@@ -93,8 +97,9 @@ int main(int argc, char* argv[]) {
const char kManifestDir[] = "build/manifest_perftest";
- if (!WriteFakeManifests(kManifestDir)) {
- fprintf(stderr, "Failed to write test data\n");
+ string err;
+ if (!WriteFakeManifests(kManifestDir, &err)) {
+ fprintf(stderr, "Failed to write test data: %s\n", err.c_str());
return 1;
}
diff --git a/ninja/src/manifest_parser_test.cc b/ninja/src/manifest_parser_test.cc
index 6909ea92405..8f7b57561e5 100644
--- a/ninja/src/manifest_parser_test.cc
+++ b/ninja/src/manifest_parser_test.cc
@@ -28,6 +28,7 @@ struct ParserTest : public testing::Test,
string err;
EXPECT_TRUE(parser.ParseTest(input, &err));
ASSERT_EQ("", err);
+ VerifyGraph(state);
}
virtual bool ReadFile(const string& path, string* content, string* err) {
@@ -60,8 +61,8 @@ TEST_F(ParserTest, Rules) {
"\n"
"build result: cat in_1.cc in-2.O\n"));
- ASSERT_EQ(3u, state.rules_.size());
- const Rule* rule = state.rules_.begin()->second;
+ ASSERT_EQ(3u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
EXPECT_EQ("cat", rule->name());
EXPECT_EQ("[cat ][$in][ > ][$out]",
rule->GetBinding("command")->Serialize());
@@ -93,8 +94,8 @@ TEST_F(ParserTest, IgnoreIndentedComments) {
"build result: cat in_1.cc in-2.O\n"
" #comment\n"));
- ASSERT_EQ(2u, state.rules_.size());
- const Rule* rule = state.rules_.begin()->second;
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
EXPECT_EQ("cat", rule->name());
Edge* edge = state.GetNode("result", 0)->in_edge();
EXPECT_TRUE(edge->GetBindingBool("restat"));
@@ -126,8 +127,8 @@ TEST_F(ParserTest, ResponseFiles) {
"build out: cat_rsp in\n"
" rspfile=out.rsp\n"));
- ASSERT_EQ(2u, state.rules_.size());
- const Rule* rule = state.rules_.begin()->second;
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
EXPECT_EQ("cat_rsp", rule->name());
EXPECT_EQ("[cat ][$rspfile][ > ][$out]",
rule->GetBinding("command")->Serialize());
@@ -143,8 +144,8 @@ TEST_F(ParserTest, InNewline) {
"build out: cat_rsp in in2\n"
" rspfile=out.rsp\n"));
- ASSERT_EQ(2u, state.rules_.size());
- const Rule* rule = state.rules_.begin()->second;
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
EXPECT_EQ("cat_rsp", rule->name());
EXPECT_EQ("[cat ][$in_newline][ > ][$out]",
rule->GetBinding("command")->Serialize());
@@ -204,8 +205,8 @@ TEST_F(ParserTest, Continuation) {
"build a: link c $\n"
" d e f\n"));
- ASSERT_EQ(2u, state.rules_.size());
- const Rule* rule = state.rules_.begin()->second;
+ ASSERT_EQ(2u, state.bindings_.GetRules().size());
+ const Rule* rule = state.bindings_.GetRules().begin()->second;
EXPECT_EQ("link", rule->name());
EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize());
}
@@ -340,6 +341,42 @@ TEST_F(ParserTest, CanonicalizePathsBackslashes) {
}
#endif
+TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out1 out2: cat in1\n"
+"build out1: cat in2\n"
+"build final: cat out1\n"
+));
+ // AssertParse() checks that the generated build graph is self-consistent.
+ // That's all the checking that this test needs.
+}
+
+TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out: cat in\n"
+"build out: cat in\n"
+));
+ // AssertParse() checks that the generated build graph is self-consistent.
+ // That's all the checking that this test needs.
+}
+
+TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) {
+ const char kInput[] =
+"rule cat\n"
+" command = cat $in > $out\n"
+"build out1 out2: cat in1\n"
+"build out1: cat in2\n"
+"build final: cat out1\n";
+ ManifestParser parser(&state, this, /*dupe_edges_should_err=*/true);
+ string err;
+ EXPECT_FALSE(parser.ParseTest(kInput, &err));
+ EXPECT_EQ("input:5: multiple rules generate out1 [-w dupbuild=err]\n", err);
+}
+
TEST_F(ParserTest, ReservedWords) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"rule build\n"
@@ -823,18 +860,27 @@ TEST_F(ParserTest, MissingSubNinja) {
}
TEST_F(ParserTest, DuplicateRuleInDifferentSubninjas) {
- // Test that rules live in a global namespace and aren't scoped to subninjas.
+ // Test that rules are scoped to subninjas.
files_["test.ninja"] = "rule cat\n"
" command = cat\n";
ManifestParser parser(&state, this);
string err;
- EXPECT_FALSE(parser.ParseTest("rule cat\n"
+ EXPECT_TRUE(parser.ParseTest("rule cat\n"
" command = cat\n"
"subninja test.ninja\n", &err));
- EXPECT_EQ("test.ninja:1: duplicate rule 'cat'\n"
- "rule cat\n"
- " ^ near here"
- , err);
+}
+
+TEST_F(ParserTest, DuplicateRuleInDifferentSubninjasWithInclude) {
+ // Test that rules are scoped to subninjas even with includes.
+ files_["rules.ninja"] = "rule cat\n"
+ " command = cat\n";
+ files_["test.ninja"] = "include rules.ninja\n"
+ "build x : cat\n";
+ ManifestParser parser(&state, this);
+ string err;
+ EXPECT_TRUE(parser.ParseTest("include rules.ninja\n"
+ "subninja test.ninja\n"
+ "build y : cat\n", &err));
}
TEST_F(ParserTest, Include) {
@@ -891,6 +937,16 @@ TEST_F(ParserTest, DefaultDefault) {
EXPECT_EQ("", err);
}
+TEST_F(ParserTest, DefaultDefaultCycle) {
+ ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n command = cat $in > $out\n"
+"build a: cat a\n"));
+
+ string err;
+ EXPECT_EQ(0u, state.DefaultNodes(&err).size());
+ EXPECT_EQ("could not determine root nodes of build graph", err);
+}
+
TEST_F(ParserTest, DefaultStatements) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"rule cat\n command = cat $in > $out\n"
diff --git a/ninja/src/ninja.cc b/ninja/src/ninja.cc
index 2c890c24cc6..e5bf98dddd5 100644
--- a/ninja/src/ninja.cc
+++ b/ninja/src/ninja.cc
@@ -64,6 +64,9 @@ struct Options {
/// Tool to run rather than building.
const Tool* tool;
+
+ /// Whether duplicate rules for one target should warn or print an error.
+ bool dupe_edges_should_err;
};
/// The Ninja main() loads up a series of data structures; various tools need
@@ -140,6 +143,8 @@ struct NinjaMain : public BuildLogUser {
virtual bool IsPathDead(StringPiece s) const {
Node* n = state_.LookupNode(s);
+ if (!n || !n->in_edge())
+ return false;
// Just checking n isn't enough: If an old output is both in the build log
// and in the deps log, it will have a Node object in state_. (It will also
// have an in edge if one of its inputs is another output that's in the deps
@@ -149,7 +154,11 @@ struct NinjaMain : public BuildLogUser {
// which seems good enough for this corner case.)
// Do keep entries around for files which still exist on disk, for
// generators that want to use this information.
- return (!n || !n->in_edge()) && disk_interface_.Stat(s.AsString()) == 0;
+ string err;
+ TimeStamp mtime = disk_interface_.Stat(s.AsString(), &err);
+ if (mtime == -1)
+ Error("%s", err.c_str()); // Log and ignore Stat() errors.
+ return mtime == 0;
}
};
@@ -192,14 +201,15 @@ void Usage(const BuildConfig& config) {
" -f FILE specify input build file [default=build.ninja]\n"
"\n"
" -j N run N jobs in parallel [default=%d, derived from CPUs available]\n"
-" -l N do not start new jobs if the load average is greater than N\n"
" -k N keep going until N jobs fail [default=1]\n"
+" -l N do not start new jobs if the load average is greater than N\n"
" -n dry run (don't run commands but act like they succeeded)\n"
" -v show all command lines while building\n"
"\n"
" -d MODE enable debugging (use -d list to list modes)\n"
" -t TOOL run a subtool (use -t list to list subtools)\n"
-" terminates toplevel options; further flags are passed to the tool\n",
+" terminates toplevel options; further flags are passed to the tool\n"
+" -w FLAG adjust warnings (use -w list to list warnings)\n",
kNinjaVersion, config.parallelism);
}
@@ -241,12 +251,11 @@ bool NinjaMain::RebuildManifest(const char* input_file, string* err) {
if (builder.AlreadyUpToDate())
return false; // Not an error, but we didn't rebuild.
- if (!builder.Build(err))
- return false;
- // The manifest was only rebuilt if it is now dirty (it may have been cleaned
- // by a restat).
- return node->dirty();
+ // Even if the manifest was cleaned by a restat rule, claim that it was
+ // rebuilt. Not doing so can lead to crashes, see
+ // https://github.com/martine/ninja/issues/874
+ return builder.Build(err);
}
Node* NinjaMain::CollectTarget(const char* cpath, string* err) {
@@ -478,7 +487,10 @@ int NinjaMain::ToolDeps(int argc, char** argv) {
continue;
}
- TimeStamp mtime = disk_interface.Stat((*it)->path());
+ string err;
+ TimeStamp mtime = disk_interface.Stat((*it)->path(), &err);
+ if (mtime == -1)
+ Error("%s", err.c_str()); // Log and ignore Stat() errors;
printf("%s: #deps %d, deps mtime %d (%s)\n",
(*it)->path().c_str(), deps->node_count, deps->mtime,
(!mtime || mtime > deps->mtime ? "STALE":"VALID"));
@@ -793,6 +805,32 @@ bool DebugEnable(const string& name) {
}
}
+/// Set a warning flag. Returns false if Ninja should exit instead of
+/// continuing.
+bool WarningEnable(const string& name, Options* options) {
+ if (name == "list") {
+ printf("warning flags:\n"
+" dupbuild={err,warn} multiple build lines for one target\n");
+ return false;
+ } else if (name == "dupbuild=err") {
+ options->dupe_edges_should_err = true;
+ return true;
+ } else if (name == "dupbuild=warn") {
+ options->dupe_edges_should_err = false;
+ return true;
+ } else {
+ const char* suggestion =
+ SpellcheckString(name.c_str(), "dupbuild=err", "dupbuild=warn", NULL);
+ if (suggestion) {
+ Error("unknown warning flag '%s', did you mean '%s'?",
+ name.c_str(), suggestion);
+ } else {
+ Error("unknown warning flag '%s'", name.c_str());
+ }
+ return false;
+ }
+}
+
bool NinjaMain::OpenBuildLog(bool recompact_only) {
string log_path = ".ninja_log";
if (!build_dir_.empty())
@@ -963,7 +1001,7 @@ int ReadFlags(int* argc, char*** argv,
int opt;
while (!options->tool &&
- (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vC:h", kLongOptions,
+ (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vw:C:h", kLongOptions,
NULL)) != -1) {
switch (opt) {
case 'd':
@@ -1012,6 +1050,10 @@ int ReadFlags(int* argc, char*** argv,
case 'v':
config->verbosity = BuildConfig::VERBOSE;
break;
+ case 'w':
+ if (!WarningEnable(optarg, options))
+ return 1;
+ break;
case 'C':
options->working_dir = optarg;
break;
@@ -1062,13 +1104,14 @@ int real_main(int argc, char** argv) {
return (ninja.*options.tool->func)(argc, argv);
}
- // The build can take up to 2 passes: one to rebuild the manifest, then
- // another to build the desired target.
- for (int cycle = 0; cycle < 2; ++cycle) {
+ // Limit number of rebuilds, to prevent infinite loops.
+ const int kCycleLimit = 100;
+ for (int cycle = 1; cycle <= kCycleLimit; ++cycle) {
NinjaMain ninja(ninja_command, config);
RealFileReader file_reader;
- ManifestParser parser(&ninja.state_, &file_reader);
+ ManifestParser parser(&ninja.state_, &file_reader,
+ options.dupe_edges_should_err);
string err;
if (!parser.Load(options.input_file, &err)) {
Error("%s", err.c_str());
@@ -1087,16 +1130,13 @@ int real_main(int argc, char** argv) {
if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS)
return (ninja.*options.tool->func)(argc, argv);
- // The first time through, attempt to rebuild the manifest before
- // building anything else.
- if (cycle == 0) {
- if (ninja.RebuildManifest(options.input_file, &err)) {
- // Start the build over with the new manifest.
- continue;
- } else if (!err.empty()) {
- Error("rebuilding '%s': %s", options.input_file, err.c_str());
- return 1;
- }
+ // Attempt to rebuild the manifest before building anything else
+ if (ninja.RebuildManifest(options.input_file, &err)) {
+ // Start the build over with the new manifest.
+ continue;
+ } else if (!err.empty()) {
+ Error("rebuilding '%s': %s", options.input_file, err.c_str());
+ return 1;
}
int result = ninja.RunBuild(argc, argv);
@@ -1105,7 +1145,9 @@ int real_main(int argc, char** argv) {
return result;
}
- return 1; // Shouldn't be reached.
+ Error("manifest '%s' still dirty after %d tries\n",
+ options.input_file, kCycleLimit);
+ return 1;
}
} // anonymous namespace
diff --git a/ninja/src/state.cc b/ninja/src/state.cc
index 6e3e10d65a9..a70f211425b 100644
--- a/ninja/src/state.cc
+++ b/ninja/src/state.cc
@@ -61,6 +61,7 @@ void Pool::Dump() const {
}
}
+// static
bool Pool::WeightedEdgeCmp(const Edge* a, const Edge* b) {
if (!a) return b;
if (!b) return false;
@@ -73,23 +74,11 @@ Pool State::kConsolePool("console", 1);
const Rule State::kPhonyRule("phony");
State::State() {
- AddRule(&kPhonyRule);
+ bindings_.AddRule(&kPhonyRule);
AddPool(&kDefaultPool);
AddPool(&kConsolePool);
}
-void State::AddRule(const Rule* rule) {
- assert(LookupRule(rule->name()) == NULL);
- rules_[rule->name()] = rule;
-}
-
-const Rule* State::LookupRule(const string& rule_name) {
- map<string, const Rule*>::iterator i = rules_.find(rule_name);
- if (i == rules_.end())
- return NULL;
- return i->second;
-}
-
void State::AddPool(Pool* pool) {
assert(LookupPool(pool->name()) == NULL);
pools_[pool->name()] = pool;
@@ -151,16 +140,13 @@ void State::AddIn(Edge* edge, StringPiece path, unsigned int slash_bits) {
node->AddOutEdge(edge);
}
-void State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) {
+bool State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) {
Node* node = GetNode(path, slash_bits);
+ if (node->in_edge())
+ return false;
edge->outputs_.push_back(node);
- if (node->in_edge()) {
- Warning("multiple rules generate %s. "
- "builds involving this target will not be correct; "
- "continuing anyway",
- path.AsString().c_str());
- }
node->set_in_edge(edge);
+ return true;
}
bool State::AddDefault(StringPiece path, string* err) {
@@ -187,7 +173,6 @@ vector<Node*> State::RootNodes(string* err) {
if (!edges_.empty() && root_nodes.empty())
*err = "could not determine root nodes of build graph";
- assert(edges_.empty() || !root_nodes.empty());
return root_nodes;
}
diff --git a/ninja/src/state.h b/ninja/src/state.h
index 880453284c3..d7987baec35 100644
--- a/ninja/src/state.h
+++ b/ninja/src/state.h
@@ -37,13 +37,14 @@ struct Rule;
/// the total scheduled weight diminishes enough (i.e. when a scheduled edge
/// completes).
struct Pool {
- explicit Pool(const string& name, int depth)
- : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) { }
+ Pool(const string& name, int depth)
+ : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) {}
// A depth of 0 is infinite
bool is_valid() const { return depth_ >= 0; }
int depth() const { return depth_; }
const string& name() const { return name_; }
+ int current_use() const { return current_use_; }
/// true if the Pool might delay this edge
bool ShouldDelayEdge() const { return depth_ != 0; }
@@ -79,7 +80,7 @@ struct Pool {
DelayedEdges delayed_;
};
-/// Global state (file status, loaded rules) for a single run.
+/// Global state (file status) for a single run.
struct State {
static Pool kDefaultPool;
static Pool kConsolePool;
@@ -87,9 +88,6 @@ struct State {
State();
- void AddRule(const Rule* rule);
- const Rule* LookupRule(const string& rule_name);
-
void AddPool(Pool* pool);
Pool* LookupPool(const string& pool_name);
@@ -100,7 +98,7 @@ struct State {
Node* SpellcheckNode(const string& path);
void AddIn(Edge* edge, StringPiece path, unsigned int slash_bits);
- void AddOut(Edge* edge, StringPiece path, unsigned int slash_bits);
+ bool AddOut(Edge* edge, StringPiece path, unsigned int slash_bits);
bool AddDefault(StringPiece path, string* error);
/// Reset state. Keeps all nodes and edges, but restores them to the
@@ -119,9 +117,6 @@ struct State {
typedef ExternalStringHashMap<Node*>::Type Paths;
Paths paths_;
- /// All the rules used in the graph.
- map<string, const Rule*> rules_;
-
/// All the pools used in the graph.
map<string, Pool*> pools_;
diff --git a/ninja/src/state_test.cc b/ninja/src/state_test.cc
index bd133b60b17..458b5196c34 100644
--- a/ninja/src/state_test.cc
+++ b/ninja/src/state_test.cc
@@ -29,7 +29,7 @@ TEST(State, Basic) {
Rule* rule = new Rule("cat");
rule->AddBinding("command", command);
- state.AddRule(rule);
+ state.bindings_.AddRule(rule);
Edge* edge = state.AddEdge(rule);
state.AddIn(edge, "in1", 0);
diff --git a/ninja/src/subprocess-posix.cc b/ninja/src/subprocess-posix.cc
index 743e4066136..f3baec2ffa3 100644
--- a/ninja/src/subprocess-posix.cc
+++ b/ninja/src/subprocess-posix.cc
@@ -28,6 +28,7 @@
Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1),
use_console_(use_console) {
}
+
Subprocess::~Subprocess() {
if (fd_ >= 0)
close(fd_);
@@ -59,14 +60,19 @@ bool Subprocess::Start(SubprocessSet* set, const string& command) {
// Track which fd we use to report errors on.
int error_pipe = output_pipe[1];
do {
- if (sigaction(SIGINT, &set->old_act_, 0) < 0)
+ if (sigaction(SIGINT, &set->old_int_act_, 0) < 0)
+ break;
+ if (sigaction(SIGTERM, &set->old_term_act_, 0) < 0)
break;
if (sigprocmask(SIG_SETMASK, &set->old_mask_, 0) < 0)
break;
if (!use_console_) {
- // Put the child in its own process group, so ctrl-c won't reach it.
- if (setpgid(0, 0) < 0)
+ // Put the child in its own session and process group. It will be
+ // detached from the current terminal and ctrl-c won't reach it.
+ // Since this process was just forked, it is not a process group leader
+ // and setsid() will succeed.
+ if (setsid() < 0)
break;
// Open /dev/null over stdin.
@@ -130,7 +136,7 @@ ExitStatus Subprocess::Finish() {
if (exit == 0)
return ExitSuccess;
} else if (WIFSIGNALED(status)) {
- if (WTERMSIG(status) == SIGINT)
+ if (WTERMSIG(status) == SIGINT || WTERMSIG(status) == SIGTERM)
return ExitInterrupted;
}
return ExitFailure;
@@ -144,31 +150,48 @@ const string& Subprocess::GetOutput() const {
return buf_;
}
-bool SubprocessSet::interrupted_;
+int SubprocessSet::interrupted_;
void SubprocessSet::SetInterruptedFlag(int signum) {
- (void) signum;
- interrupted_ = true;
+ interrupted_ = signum;
+}
+
+void SubprocessSet::HandlePendingInterruption() {
+ sigset_t pending;
+ sigemptyset(&pending);
+ if (sigpending(&pending) == -1) {
+ perror("ninja: sigpending");
+ return;
+ }
+ if (sigismember(&pending, SIGINT))
+ interrupted_ = SIGINT;
+ else if (sigismember(&pending, SIGTERM))
+ interrupted_ = SIGTERM;
}
SubprocessSet::SubprocessSet() {
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGINT);
+ sigaddset(&set, SIGTERM);
if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0)
Fatal("sigprocmask: %s", strerror(errno));
struct sigaction act;
memset(&act, 0, sizeof(act));
act.sa_handler = SetInterruptedFlag;
- if (sigaction(SIGINT, &act, &old_act_) < 0)
+ if (sigaction(SIGINT, &act, &old_int_act_) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigaction(SIGTERM, &act, &old_term_act_) < 0)
Fatal("sigaction: %s", strerror(errno));
}
SubprocessSet::~SubprocessSet() {
Clear();
- if (sigaction(SIGINT, &old_act_, 0) < 0)
+ if (sigaction(SIGINT, &old_int_act_, 0) < 0)
+ Fatal("sigaction: %s", strerror(errno));
+ if (sigaction(SIGTERM, &old_term_act_, 0) < 0)
Fatal("sigaction: %s", strerror(errno));
if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0)
Fatal("sigprocmask: %s", strerror(errno));
@@ -199,16 +222,20 @@ bool SubprocessSet::DoWork() {
++nfds;
}
- interrupted_ = false;
+ interrupted_ = 0;
int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_);
if (ret == -1) {
if (errno != EINTR) {
perror("ninja: ppoll");
return false;
}
- return interrupted_;
+ return IsInterrupted();
}
+ HandlePendingInterruption();
+ if (IsInterrupted())
+ return true;
+
nfds_t cur_nfd = 0;
for (vector<Subprocess*>::iterator i = running_.begin();
i != running_.end(); ) {
@@ -227,7 +254,7 @@ bool SubprocessSet::DoWork() {
++i;
}
- return interrupted_;
+ return IsInterrupted();
}
#else // !defined(USE_PPOLL)
@@ -246,16 +273,20 @@ bool SubprocessSet::DoWork() {
}
}
- interrupted_ = false;
+ interrupted_ = 0;
int ret = pselect(nfds, &set, 0, 0, 0, &old_mask_);
if (ret == -1) {
if (errno != EINTR) {
perror("ninja: pselect");
return false;
}
- return interrupted_;
+ return IsInterrupted();
}
+ HandlePendingInterruption();
+ if (IsInterrupted())
+ return true;
+
for (vector<Subprocess*>::iterator i = running_.begin();
i != running_.end(); ) {
int fd = (*i)->fd_;
@@ -270,7 +301,7 @@ bool SubprocessSet::DoWork() {
++i;
}
- return interrupted_;
+ return IsInterrupted();
}
#endif // !defined(USE_PPOLL)
@@ -285,10 +316,10 @@ Subprocess* SubprocessSet::NextFinished() {
void SubprocessSet::Clear() {
for (vector<Subprocess*>::iterator i = running_.begin();
i != running_.end(); ++i)
- // Since the foreground process is in our process group, it will receive a
- // SIGINT at the same time as us.
+ // Since the foreground process is in our process group, it will receive
+ // the interruption signal (i.e. SIGINT or SIGTERM) at the same time as us.
if (!(*i)->use_console_)
- kill(-(*i)->pid_, SIGINT);
+ kill(-(*i)->pid_, interrupted_);
for (vector<Subprocess*>::iterator i = running_.begin();
i != running_.end(); ++i)
delete *i;
diff --git a/ninja/src/subprocess.h b/ninja/src/subprocess.h
index b7a1a4c048e..a001fc9e6b4 100644
--- a/ninja/src/subprocess.h
+++ b/ninja/src/subprocess.h
@@ -89,9 +89,15 @@ struct SubprocessSet {
static HANDLE ioport_;
#else
static void SetInterruptedFlag(int signum);
- static bool interrupted_;
+ static void HandlePendingInterruption();
+ /// Store the signal number that causes the interruption.
+ /// 0 if not interruption.
+ static int interrupted_;
- struct sigaction old_act_;
+ static bool IsInterrupted() { return interrupted_ != 0; }
+
+ struct sigaction old_int_act_;
+ struct sigaction old_term_act_;
sigset_t old_mask_;
#endif
};
diff --git a/ninja/src/subprocess_test.cc b/ninja/src/subprocess_test.cc
index 8a0787c987c..07cc52f6ba3 100644
--- a/ninja/src/subprocess_test.cc
+++ b/ninja/src/subprocess_test.cc
@@ -16,6 +16,8 @@
#include "test.h"
+#include <string>
+
#ifndef _WIN32
// SetWithLots need setrlimit.
#include <stdio.h>
@@ -96,12 +98,47 @@ TEST_F(SubprocessTest, InterruptParent) {
ASSERT_FALSE("We should have been interrupted");
}
+TEST_F(SubprocessTest, InterruptChildWithSigTerm) {
+ Subprocess* subproc = subprocs_.Add("kill -TERM $$");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_EQ(ExitInterrupted, subproc->Finish());
+}
+
+TEST_F(SubprocessTest, InterruptParentWithSigTerm) {
+ Subprocess* subproc = subprocs_.Add("kill -TERM $PPID ; sleep 1");
+ ASSERT_NE((Subprocess *) 0, subproc);
+
+ while (!subproc->Done()) {
+ bool interrupted = subprocs_.DoWork();
+ if (interrupted)
+ return;
+ }
+
+ ASSERT_FALSE("We should have been interrupted");
+}
+
+// A shell command to check if the current process is connected to a terminal.
+// This is different from having stdin/stdout/stderr be a terminal. (For
+// instance consider the command "yes < /dev/null > /dev/null 2>&1".
+// As "ps" will confirm, "yes" could still be connected to a terminal, despite
+// not having any of the standard file descriptors be a terminal.
+static const char kIsConnectedToTerminal[] = "tty < /dev/tty > /dev/null";
+
TEST_F(SubprocessTest, Console) {
// Skip test if we don't have the console ourselves.
if (isatty(0) && isatty(1) && isatty(2)) {
- Subprocess* subproc = subprocs_.Add("test -t 0 -a -t 1 -a -t 2",
- /*use_console=*/true);
- ASSERT_NE((Subprocess *) 0, subproc);
+ // Test that stdin, stdout and stderr are a terminal.
+ // Also check that the current process is connected to a terminal.
+ Subprocess* subproc =
+ subprocs_.Add(std::string("test -t 0 -a -t 1 -a -t 2 && ") +
+ std::string(kIsConnectedToTerminal),
+ /*use_console=*/true);
+ ASSERT_NE((Subprocess*)0, subproc);
while (!subproc->Done()) {
subprocs_.DoWork();
@@ -111,6 +148,18 @@ TEST_F(SubprocessTest, Console) {
}
}
+TEST_F(SubprocessTest, NoConsole) {
+ Subprocess* subproc =
+ subprocs_.Add(kIsConnectedToTerminal, /*use_console=*/false);
+ ASSERT_NE((Subprocess*)0, subproc);
+
+ while (!subproc->Done()) {
+ subprocs_.DoWork();
+ }
+
+ EXPECT_NE(ExitSuccess, subproc->Finish());
+}
+
#endif
TEST_F(SubprocessTest, SetWithSingle) {
@@ -177,8 +226,8 @@ TEST_F(SubprocessTest, SetWithLots) {
// Make sure [ulimit -n] isn't going to stop us from working.
rlimit rlim;
ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlim));
- if (!EXPECT_GT(rlim.rlim_cur, kNumProcs)) {
- printf("Raise [ulimit -n] well above %u to make this test go\n", kNumProcs);
+ if (rlim.rlim_cur < kNumProcs) {
+ printf("Raise [ulimit -n] well above %u (currently %lu) to make this test go\n", kNumProcs, rlim.rlim_cur);
return;
}
@@ -196,7 +245,7 @@ TEST_F(SubprocessTest, SetWithLots) {
}
ASSERT_EQ(kNumProcs, subprocs_.finished_.size());
}
-#endif // !__APPLE__ && !_WIN32
+#endif // !__APPLE__ && !_WIN32
// TODO: this test could work on Windows, just not sure how to simply
// read stdin.
diff --git a/ninja/src/test.cc b/ninja/src/test.cc
index f667fef0f30..aed8db767cd 100644
--- a/ninja/src/test.cc
+++ b/ninja/src/test.cc
@@ -28,6 +28,7 @@
#endif
#include "build_log.h"
+#include "graph.h"
#include "manifest_parser.h"
#include "util.h"
@@ -98,12 +99,45 @@ void AssertParse(State* state, const char* input) {
string err;
EXPECT_TRUE(parser.ParseTest(input, &err));
ASSERT_EQ("", err);
+ VerifyGraph(*state);
}
void AssertHash(const char* expected, uint64_t actual) {
ASSERT_EQ(BuildLog::LogEntry::HashCommand(expected), actual);
}
+void VerifyGraph(const State& state) {
+ for (vector<Edge*>::const_iterator e = state.edges_.begin();
+ e != state.edges_.end(); ++e) {
+ // All edges need at least one output.
+ EXPECT_FALSE((*e)->outputs_.empty());
+ // Check that the edge's inputs have the edge as out-edge.
+ for (vector<Node*>::const_iterator in_node = (*e)->inputs_.begin();
+ in_node != (*e)->inputs_.end(); ++in_node) {
+ const vector<Edge*>& out_edges = (*in_node)->out_edges();
+ EXPECT_NE(std::find(out_edges.begin(), out_edges.end(), *e),
+ out_edges.end());
+ }
+ // Check that the edge's outputs have the edge as in-edge.
+ for (vector<Node*>::const_iterator out_node = (*e)->outputs_.begin();
+ out_node != (*e)->outputs_.end(); ++out_node) {
+ EXPECT_EQ((*out_node)->in_edge(), *e);
+ }
+ }
+
+ // The union of all in- and out-edges of each nodes should be exactly edges_.
+ set<const Edge*> node_edge_set;
+ for (State::Paths::const_iterator p = state.paths_.begin();
+ p != state.paths_.end(); ++p) {
+ const Node* n = p->second;
+ if (n->in_edge())
+ node_edge_set.insert(n->in_edge());
+ node_edge_set.insert(n->out_edges().begin(), n->out_edges().end());
+ }
+ set<const Edge*> edge_set(state.edges_.begin(), state.edges_.end());
+ EXPECT_EQ(node_edge_set, edge_set);
+}
+
void VirtualFileSystem::Create(const string& path,
const string& contents) {
files_[path].mtime = now_;
@@ -111,10 +145,12 @@ void VirtualFileSystem::Create(const string& path,
files_created_.insert(path);
}
-TimeStamp VirtualFileSystem::Stat(const string& path) const {
+TimeStamp VirtualFileSystem::Stat(const string& path, string* err) const {
FileMap::const_iterator i = files_.find(path);
- if (i != files_.end())
+ if (i != files_.end()) {
+ *err = i->second.stat_error;
return i->second.mtime;
+ }
return 0;
}
diff --git a/ninja/src/test.h b/ninja/src/test.h
index 4c152039d7c..156e68a69ef 100644
--- a/ninja/src/test.h
+++ b/ninja/src/test.h
@@ -124,6 +124,7 @@ struct StateTestWithBuiltinRules : public testing::Test {
void AssertParse(State* state, const char* input);
void AssertHash(const char* expected, uint64_t actual);
+void VerifyGraph(const State& state);
/// An implementation of DiskInterface that uses an in-memory representation
/// of disk state. It also logs file accesses and directory creations
@@ -141,7 +142,7 @@ struct VirtualFileSystem : public DiskInterface {
}
// DiskInterface
- virtual TimeStamp Stat(const string& path) const;
+ virtual TimeStamp Stat(const string& path, string* err) const;
virtual bool WriteFile(const string& path, const string& contents);
virtual bool MakeDir(const string& path);
virtual string ReadFile(const string& path, string* err);
@@ -150,6 +151,7 @@ struct VirtualFileSystem : public DiskInterface {
/// An entry for a single in-memory file.
struct Entry {
int mtime;
+ string stat_error; // If mtime is -1.
string contents;
};
diff --git a/ninja/src/util.cc b/ninja/src/util.cc
index 746d7ed1aab..aa47f2f69bf 100644
--- a/ninja/src/util.cc
+++ b/ninja/src/util.cc
@@ -14,7 +14,10 @@
#include "util.h"
-#ifdef _WIN32
+#ifdef __CYGWIN__
+#include <windows.h>
+#include <io.h>
+#elif defined( _WIN32)
#include <windows.h>
#include <io.h>
#include <share.h>
@@ -497,7 +500,7 @@ string StripAnsiEscapeCodes(const string& in) {
int GetProcessorCount() {
#ifdef _WIN32
SYSTEM_INFO info;
- GetSystemInfo(&info);
+ GetNativeSystemInfo(&info);
return info.dwNumberOfProcessors;
#else
return sysconf(_SC_NPROCESSORS_ONLN);
diff --git a/ninja/src/version.cc b/ninja/src/version.cc
index 2d2d9c010b8..14d43f0b94c 100644
--- a/ninja/src/version.cc
+++ b/ninja/src/version.cc
@@ -18,7 +18,7 @@
#include "util.h"
-const char* kNinjaVersion = "1.5.3";
+const char* kNinjaVersion = "1.6.0";
void ParseVersion(const string& version, int* major, int* minor) {
size_t end = version.find('.');