summaryrefslogtreecommitdiff
path: root/deps/gyp/pylib/gyp/input.py
diff options
context:
space:
mode:
Diffstat (limited to 'deps/gyp/pylib/gyp/input.py')
-rw-r--r--deps/gyp/pylib/gyp/input.py403
1 files changed, 227 insertions, 176 deletions
diff --git a/deps/gyp/pylib/gyp/input.py b/deps/gyp/pylib/gyp/input.py
index 6472912db8..dc143d9dfc 100644
--- a/deps/gyp/pylib/gyp/input.py
+++ b/deps/gyp/pylib/gyp/input.py
@@ -10,8 +10,8 @@ from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
-import copy
import gyp.common
+import gyp.simple_copy
import multiprocessing
import optparse
import os.path
@@ -24,6 +24,7 @@ import threading
import time
import traceback
from gyp.common import GypError
+from gyp.common import OrderedSet
# A list of types that are treated as linkable.
@@ -45,18 +46,31 @@ base_path_sections = [
'outputs',
'sources',
]
-path_sections = []
-
-is_path_section_charset = set('=+?!')
-is_path_section_match_re = re.compile('_(dir|file|path)s?$')
+path_sections = set()
def IsPathSection(section):
- # If section ends in one of these characters, it's applied to a section
+ # If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
- while section[-1:] in is_path_section_charset:
+ while section[-1:] in '=+?!':
section = section[:-1]
- return section in path_sections or is_path_section_match_re.search(section)
+
+ if section in path_sections:
+ return True
+
+ # Sections mathing the regexp '_(dir|file|path)s?$' are also
+ # considered PathSections. Using manual string matching since that
+ # is much faster than the regexp and this can be called hundreds of
+ # thousands of times so micro performance matters.
+ if "_" in section:
+ tail = section[-6:]
+ if tail[-1] == 's':
+ tail = tail[:-1]
+ if tail[-5:] in ('_file', '_path'):
+ return True
+ return tail[-4:] == '_dir'
+
+ return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
@@ -200,7 +214,7 @@ def CheckNode(node, keypath):
"': " + repr(node)
-def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
+def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
@@ -224,7 +238,7 @@ def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
- if not isinstance(build_file_data, dict):
+ if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
@@ -236,10 +250,10 @@ def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
- aux_data, variables, includes, check)
+ aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
- aux_data, variables, None, check)
+ aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
@@ -249,7 +263,7 @@ def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
- variables, includes, check):
+ includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
@@ -273,30 +287,27 @@ def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
- LoadOneBuildFile(include, data, aux_data, variables, None,
- False, check),
+ LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
- if v.__class__ == dict:
- LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
+ if type(v) is dict:
+ LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
- elif v.__class__ == list:
- LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
+ elif type(v) is list:
+ LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
-def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
- variables, check):
+def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
- if item.__class__ == dict:
+ if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
- variables, None, check)
- elif item.__class__ == list:
- LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
- variables, check)
+ None, check)
+ elif type(item) is list:
+ LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
@@ -320,7 +331,7 @@ def ProcessToolsetsInDict(data):
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
- new_target = copy.deepcopy(target)
+ new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
@@ -328,7 +339,7 @@ def ProcessToolsetsInDict(data):
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
- if isinstance(condition, list):
+ if type(condition) is list:
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
@@ -358,7 +369,7 @@ def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
- build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
+ build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
@@ -408,7 +419,8 @@ def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
- new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
+ new_target_dict = gyp.simple_copy.deepcopy(
+ build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
@@ -636,15 +648,26 @@ def FindEnclosingBracketGroup(input_str):
return (-1, -1)
-canonical_int_re = re.compile('(0|-?[1-9][0-9]*)$')
-
-
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
- return isinstance(string, str) and canonical_int_re.match(string)
+ if type(string) is str:
+ # This function is called a lot so for maximum performance, avoid
+ # involving regexps which would otherwise make the code much
+ # shorter. Regexps would need twice the time of this function.
+ if string:
+ if string == "0":
+ return True
+ if string[0] == "-":
+ string = string[1:]
+ if not string:
+ return False
+ if '1' <= string[0] <= '9':
+ return string.isdigit()
+
+ return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
@@ -677,7 +700,7 @@ cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
- if type(cmd) == list:
+ if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
@@ -767,7 +790,7 @@ def ExpandVariables(input, phase, variables, build_file):
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
- processed_variables = copy.deepcopy(variables)
+ processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
@@ -804,7 +827,7 @@ def ExpandVariables(input, phase, variables, build_file):
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
- if type(contents) == list:
+ if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
@@ -837,17 +860,15 @@ def ExpandVariables(input, phase, variables, build_file):
use_shell = False
# Check for a cached value to avoid executing commands, or generating
- # file lists more than once.
- # TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
- # possible that the command being invoked depends on the current
- # directory. For that case the syntax needs to be extended so that the
- # directory is also used in cache_key (it becomes a tuple).
+ # file lists more than once. The cache key contains the command to be
+ # run as well as the directory to run it from, to account for commands
+ # that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
- cache_key = str(contents)
+ cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
@@ -925,10 +946,9 @@ def ExpandVariables(input, phase, variables, build_file):
else:
replacement = variables[contents]
- if isinstance(replacement, list):
+ if type(replacement) is list:
for item in replacement:
- if (not contents[-1] == '/' and
- not isinstance(item, str) and not isinstance(item, int)):
+ if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
@@ -938,8 +958,7 @@ def ExpandVariables(input, phase, variables, build_file):
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
- elif not isinstance(replacement, str) and \
- not isinstance(replacement, int):
+ elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
@@ -948,7 +967,7 @@ def ExpandVariables(input, phase, variables, build_file):
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
- if isinstance(replacement, list):
+ if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
@@ -957,7 +976,7 @@ def ExpandVariables(input, phase, variables, build_file):
else:
# Expanding in string context.
encoded_replacement = ''
- if isinstance(replacement, list):
+ if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
@@ -979,8 +998,8 @@ def ExpandVariables(input, phase, variables, build_file):
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
- if isinstance(output, list):
- if output and isinstance(output[0], list):
+ if type(output) is list:
+ if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
@@ -994,7 +1013,7 @@ def ExpandVariables(input, phase, variables, build_file):
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
- if isinstance(output, list):
+ if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
@@ -1003,6 +1022,57 @@ def ExpandVariables(input, phase, variables, build_file):
return output
+# The same condition is often evaluated over and over again so it
+# makes sense to cache as much as possible between evaluations.
+cached_conditions_asts = {}
+
+def EvalCondition(condition, conditions_key, phase, variables, build_file):
+ """Returns the dict that should be used or None if the result was
+ that nothing should be used."""
+ if type(condition) is not list:
+ raise GypError(conditions_key + ' must be a list')
+ if len(condition) != 2 and len(condition) != 3:
+ # It's possible that condition[0] won't work in which case this
+ # attempt will raise its own IndexError. That's probably fine.
+ raise GypError(conditions_key + ' ' + condition[0] +
+ ' must be length 2 or 3, not ' + str(len(condition)))
+
+ [cond_expr, true_dict] = condition[0:2]
+ false_dict = None
+ if len(condition) == 3:
+ false_dict = condition[2]
+
+ # Do expansions on the condition itself. Since the conditon can naturally
+ # contain variable references without needing to resort to GYP expansion
+ # syntax, this is of dubious value for variables, but someone might want to
+ # use a command expansion directly inside a condition.
+ cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
+ build_file)
+ if type(cond_expr_expanded) not in (str, int):
+ raise ValueError, \
+ 'Variable expansion in this context permits str and int ' + \
+ 'only, found ' + cond_expr_expanded.__class__.__name__
+
+ try:
+ if cond_expr_expanded in cached_conditions_asts:
+ ast_code = cached_conditions_asts[cond_expr_expanded]
+ else:
+ ast_code = compile(cond_expr_expanded, '<string>', 'eval')
+ cached_conditions_asts[cond_expr_expanded] = ast_code
+ if eval(ast_code, {'__builtins__': None}, variables):
+ return true_dict
+ return false_dict
+ except SyntaxError, e:
+ syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
+ 'at character %d.' %
+ (str(e.args[0]), e.text, build_file, e.offset),
+ e.filename, e.lineno, e.offset, e.text)
+ raise syntax_error
+ except NameError, e:
+ gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
+ (cond_expr_expanded, build_file))
+ raise GypError(e)
+
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
@@ -1038,48 +1108,8 @@ def ProcessConditionsInDict(the_dict, phase, variables, build_file):
del the_dict[conditions_key]
for condition in conditions_list:
- if not isinstance(condition, list):
- raise GypError(conditions_key + ' must be a list')
- if len(condition) != 2 and len(condition) != 3:
- # It's possible that condition[0] won't work in which case this
- # attempt will raise its own IndexError. That's probably fine.
- raise GypError(conditions_key + ' ' + condition[0] +
- ' must be length 2 or 3, not ' + str(len(condition)))
-
- [cond_expr, true_dict] = condition[0:2]
- false_dict = None
- if len(condition) == 3:
- false_dict = condition[2]
-
- # Do expansions on the condition itself. Since the conditon can naturally
- # contain variable references without needing to resort to GYP expansion
- # syntax, this is of dubious value for variables, but someone might want to
- # use a command expansion directly inside a condition.
- cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
- build_file)
- if not isinstance(cond_expr_expanded, str) and \
- not isinstance(cond_expr_expanded, int):
- raise ValueError, \
- 'Variable expansion in this context permits str and int ' + \
- 'only, found ' + expanded.__class__.__name__
-
- try:
- ast_code = compile(cond_expr_expanded, '<string>', 'eval')
-
- if eval(ast_code, {'__builtins__': None}, variables):
- merge_dict = true_dict
- else:
- merge_dict = false_dict
- except SyntaxError, e:
- syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
- 'at character %d.' %
- (str(e.args[0]), e.text, build_file, e.offset),
- e.filename, e.lineno, e.offset, e.text)
- raise syntax_error
- except NameError, e:
- gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
- (cond_expr_expanded, build_file))
- raise GypError(e)
+ merge_dict = EvalCondition(condition, conditions_key, phase, variables,
+ build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
@@ -1094,8 +1124,7 @@ def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
- if isinstance(value, str) or isinstance(value, int) or \
- isinstance(value, list):
+ if type(value) in (str, int, list):
variables['_' + key] = value
@@ -1108,8 +1137,7 @@ def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
- if not isinstance(value, str) and not isinstance(value, int) and \
- not isinstance(value, list):
+ if type(value) not in (str, int, list):
continue
if key.endswith('%'):
@@ -1162,9 +1190,9 @@ def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
- if key != 'variables' and isinstance(value, str):
+ if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
- if not isinstance(expanded, str) and not isinstance(expanded, int):
+ if type(expanded) not in (str, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
@@ -1221,21 +1249,21 @@ def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
- if key == 'variables' or isinstance(value, str):
+ if key == 'variables' or type(value) is str:
continue
- if isinstance(value, dict):
+ if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
- elif isinstance(value, list):
+ elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
- elif not isinstance(value, int):
+ elif type(value) is not int:
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
@@ -1246,17 +1274,17 @@ def ProcessVariablesAndConditionsInList(the_list, phase, variables,
index = 0
while index < len(the_list):
item = the_list[index]
- if isinstance(item, dict):
+ if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
- elif isinstance(item, list):
+ elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
- elif isinstance(item, str):
+ elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
- if isinstance(expanded, str) or isinstance(expanded, int):
+ if type(expanded) in (str, int):
the_list[index] = expanded
- elif isinstance(expanded, list):
+ elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
@@ -1268,7 +1296,7 @@ def ProcessVariablesAndConditionsInList(the_list, phase, variables,
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
- elif not isinstance(item, int):
+ elif type(item) is not int:
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
@@ -1443,6 +1471,20 @@ def RemoveSelfDependencies(targets):
target_dict[dependency_key] = Filter(dependencies, target_name)
+def RemoveLinkDependenciesFromNoneTargets(targets):
+ """Remove dependencies having the 'link_dependency' attribute from the 'none'
+ targets."""
+ for target_name, target_dict in targets.iteritems():
+ for dependency_key in dependency_sections:
+ dependencies = target_dict.get(dependency_key, [])
+ if dependencies:
+ for t in dependencies:
+ if target_dict.get('type', None) == 'none':
+ if targets[t].get('variables', {}).get('link_dependency', 0):
+ target_dict[dependency_key] = \
+ Filter(target_dict[dependency_key], t)
+
+
class DependencyGraphNode(object):
"""
@@ -1468,7 +1510,7 @@ class DependencyGraphNode(object):
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
- flat_list = []
+ flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
@@ -1482,12 +1524,15 @@ class DependencyGraphNode(object):
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
- flat_list.append(node.ref)
+ flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
+ # TODO: We want to check through the
+ # node_dependent.dependencies list but if it's long and we
+ # always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
@@ -1503,7 +1548,7 @@ class DependencyGraphNode(object):
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
- return flat_list
+ return list(flat_list)
def FindCycles(self, path=None):
"""
@@ -1589,21 +1634,26 @@ class DependencyGraphNode(object):
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
- """Returns a list of all of a target's dependencies, recursively."""
- if dependencies == None:
- dependencies = []
+ """Returns an OrderedSet of all of a target's dependencies, recursively."""
+ if dependencies is None:
+ # Using a list to get ordered output and a set to do fast "is it
+ # already added" checks.
+ dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
- if dependency.ref != None and dependency.ref not in dependencies:
- dependencies.append(dependency.ref)
+ if dependency.ref is None:
+ continue
+ if dependency.ref not in dependencies:
+ dependencies.add(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
- """Returns a list of dependency targets that are linked into this target.
+ """Returns an OrderedSet of dependency targets that are linked
+ into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
@@ -1616,11 +1666,13 @@ class DependencyGraphNode(object):
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
- if dependencies == None:
- dependencies = []
+ if dependencies is None:
+ # Using a list to get ordered output and a set to do fast "is it
+ # already added" checks.
+ dependencies = OrderedSet()
# Check for None, corresponding to the root node.
- if self.ref == None:
+ if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
@@ -1648,8 +1700,7 @@ class DependencyGraphNode(object):
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
- if self.ref not in dependencies:
- dependencies.append(self.ref)
+ dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
@@ -1671,7 +1722,7 @@ class DependencyGraphNode(object):
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
- dependencies.append(self.ref)
+ dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
@@ -1966,25 +2017,25 @@ def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
- if isinstance(item, str) or isinstance(item, int):
+ if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
- if not isinstance(item, str) or not item.startswith('-'):
+ if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
- elif isinstance(item, dict):
+ elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
- elif isinstance(item, list):
+ elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
@@ -2030,10 +2081,10 @@ def MergeDicts(to, fro, to_file, fro_file):
# modified.
if k in to:
bad_merge = False
- if isinstance(v, str) or isinstance(v, int):
- if not (isinstance(to[k], str) or isinstance(to[k], int)):
+ if type(v) in (str, int):
+ if type(to[k]) not in (str, int):
bad_merge = True
- elif v.__class__ != to[k].__class__:
+ elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
@@ -2041,19 +2092,19 @@ def MergeDicts(to, fro, to_file, fro_file):
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
- if isinstance(v, str) or isinstance(v, int):
+ if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
- elif isinstance(v, dict):
+ elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
- elif isinstance(v, list):
+ elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
@@ -2096,7 +2147,7 @@ def MergeDicts(to, fro, to_file, fro_file):
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
- if not isinstance(to[list_base], list):
+ elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
@@ -2157,43 +2208,39 @@ def SetUpConfigurations(target, target_dict):
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
- concrete = [i for i in target_dict['configurations'].iterkeys()
- if not target_dict['configurations'][i].get('abstract')]
+ concrete = [i for (i, config) in target_dict['configurations'].iteritems()
+ if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
- for configuration in target_dict['configurations'].keys():
- old_configuration_dict = target_dict['configurations'][configuration]
+ merged_configurations = {}
+ configs = target_dict['configurations']
+ for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
- new_configuration_dict = copy.deepcopy(target_dict)
-
- # Take out the bits that don't belong in a "configurations" section.
- # Since configuration setup is done before conditional, exclude, and rules
- # processing, be careful with handling of the suffix characters used in
- # those phases.
- delete_keys = []
- for key in new_configuration_dict:
+ new_configuration_dict = {}
+ for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
- if key_base in non_configuration_keys:
- delete_keys.append(key)
-
- for key in delete_keys:
- del new_configuration_dict[key]
+ if not key_base in non_configuration_keys:
+ new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
- # Put the new result back into the target dict as a configuration.
- target_dict['configurations'][configuration] = new_configuration_dict
+ merged_configurations[configuration] = new_configuration_dict
+
+ # Put the new configurations back into the target dict as a configuration.
+ for configuration in merged_configurations.keys():
+ target_dict['configurations'][configuration] = (
+ merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
@@ -2264,7 +2311,7 @@ def ProcessListFiltersInDict(name, the_dict):
if operation != '!' and operation != '/':
continue
- if not isinstance(value, list):
+ if type(value) is not list:
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
@@ -2276,7 +2323,7 @@ def ProcessListFiltersInDict(name, the_dict):
del_lists.append(key)
continue
- if not isinstance(the_dict[list_key], list):
+ if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
@@ -2378,17 +2425,17 @@ def ProcessListFiltersInDict(name, the_dict):
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
- if isinstance(value, dict):
+ if type(value) is dict:
ProcessListFiltersInDict(key, value)
- elif isinstance(value, list):
+ elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
- if isinstance(item, dict):
+ if type(item) is dict:
ProcessListFiltersInDict(name, item)
- elif isinstance(item, list):
+ elif type(item) is list:
ProcessListFiltersInList(name, item)
@@ -2506,7 +2553,7 @@ def ValidateRunAsInTarget(target, target_dict, build_file):
run_as = target_dict.get('run_as')
if not run_as:
return
- if not isinstance(run_as, dict):
+ if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
@@ -2515,17 +2562,17 @@ def ValidateRunAsInTarget(target, target_dict, build_file):
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
- if not isinstance(action, list):
+ if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
- if working_directory and not isinstance(working_directory, str):
+ if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
- if environment and not isinstance(environment, dict):
+ if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
@@ -2555,17 +2602,17 @@ def TurnIntIntoStrInDict(the_dict):
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
- if isinstance(v, int):
+ if type(v) is int:
v = str(v)
the_dict[k] = v
- elif isinstance(v, dict):
+ elif type(v) is dict:
TurnIntIntoStrInDict(v)
- elif isinstance(v, list):
+ elif type(v) is list:
TurnIntIntoStrInList(v)
- if isinstance(k, int):
- the_dict[str(k)] = v
+ if type(k) is int:
del the_dict[k]
+ the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
@@ -2573,11 +2620,11 @@ def TurnIntIntoStrInList(the_list):
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
- if isinstance(item, int):
+ if type(item) is int:
the_list[index] = str(item)
- elif isinstance(item, dict):
+ elif type(item) is dict:
TurnIntIntoStrInDict(item)
- elif isinstance(item, list):
+ elif type(item) is list:
TurnIntIntoStrInList(item)
@@ -2647,8 +2694,8 @@ def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
- path_sections = base_path_sections[:]
- path_sections.extend(generator_input_info['path_sections'])
+ path_sections = set(base_path_sections)
+ path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
@@ -2707,6 +2754,10 @@ def Load(build_files, variables, includes, depth, generator_input_info, check,
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
+ # Remove all dependencies marked as 'link_dependency' from the targets of
+ # type 'none'.
+ RemoveLinkDependenciesFromNoneTargets(targets)
+
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}