diff options
-rw-r--r-- | giscanner/annotationparser.py | 47 | ||||
-rw-r--r-- | giscanner/maintransformer.py | 46 | ||||
-rw-r--r-- | giscanner/transformer.py | 3 | ||||
-rw-r--r-- | misc/pep8.py | 140 |
4 files changed, 120 insertions, 116 deletions
diff --git a/giscanner/annotationparser.py b/giscanner/annotationparser.py index a0657dc4..9d6a6b61 100644 --- a/giscanner/annotationparser.py +++ b/giscanner/annotationparser.py @@ -483,57 +483,48 @@ class DocTag(object): int(v) except (TypeError, ValueError): if v is None: - message.warn( - 'array option %s needs a value' % ( - name, ), - positions=self.position) + message.warn('array option %s needs a value' % (name, ), + positions=self.position) else: - message.warn( - 'invalid array %s option value %r, ' - 'must be an integer' % (name, v, ), - positions=self.position) + message.warn('invalid array %s option value %r, ' + 'must be an integer' % (name, v, ), + positions=self.position) elif name == OPT_ARRAY_LENGTH: if v is None: - message.warn( - 'array option length needs a value', - positions=self.position) + message.warn('array option length needs a value', + positions=self.position) else: - message.warn( - 'invalid array annotation value: %r' % ( - name, ), self.position) + message.warn('invalid array annotation value: %r' % (name, ), + self.position) def _validate_closure(self, option, value): if value is not None and value.length() > 1: - message.warn( - 'closure takes at most 1 value, %d given' % ( - value.length(), ), self.position) + message.warn('closure takes at most 1 value, %d given' % (value.length(), ), + self.position) def _validate_element_type(self, option, value): self._validate_option(option, value, required=True) if value is None: - message.warn( - 'element-type takes at least one value, none given', - self.position) + message.warn('element-type takes at least one value, none given', + self.position) return if value.length() > 2: - message.warn( - 'element-type takes at most 2 values, %d given' % ( - value.length(), ), self.position) + message.warn('element-type takes at most 2 values, %d given' % (value.length(), ), + self.position) return def _validate_out(self, option, value): if value is None: return if value.length() > 1: - message.warn( - 'out annotation takes at most 1 value, %d given' % ( - value.length(), ), self.position) + message.warn('out annotation takes at most 1 value, %d given' % (value.length(), ), + self.position) return value_str = value.one() if value_str not in [OPT_OUT_CALLEE_ALLOCATES, OPT_OUT_CALLER_ALLOCATES]: - message.warn("out annotation value is invalid: %r" % ( - value_str, ), self.position) + message.warn("out annotation value is invalid: %r" % (value_str, ), + self.position) return def _get_gtk_doc_value(self): diff --git a/giscanner/maintransformer.py b/giscanner/maintransformer.py index 11bfb4cb..a4f8f730 100644 --- a/giscanner/maintransformer.py +++ b/giscanner/maintransformer.py @@ -143,20 +143,19 @@ class MainTransformer(object): target = self._namespace.get_by_symbol(rename_to) if not target: message.warn_node(node, - "Can't find symbol %r referenced by Rename annotation" % ( - rename_to, )) + "Can't find symbol %r referenced by Rename annotation" % (rename_to, )) elif target.shadowed_by: message.warn_node(node, - "Function %r already shadowed by %r, can't overwrite with %r" % ( - target.symbol, - target.shadowed_by, - rename_to)) + "Function %r already shadowed by %r, can't overwrite " + "with %r" % (target.symbol, + target.shadowed_by, + rename_to)) elif target.shadows: message.warn_node(node, - "Function %r already shadows %r, can't multiply shadow with %r" % ( - target.symbol, - target.shadows, - rename_to)) + "Function %r already shadows %r, can't multiply shadow " + "with %r" % (target.symbol, + target.shadows, + rename_to)) else: target.shadowed_by = node.name node.shadows = target.name @@ -723,10 +722,9 @@ class MainTransformer(object): text = ', should be one of %s' % (', '.join(repr(p) for p in unused), ) tag = block.params.get(doc_name) - message.warn( - '%s: unknown parameter %r in documentation comment%s' % ( - block.name, doc_name, text), - tag.position) + message.warn('%s: unknown parameter %r in documentation ' + 'comment%s' % (block.name, doc_name, text), + tag.position) def _apply_annotations_callable(self, node, chain, block): self._apply_annotations_annotated(node, block) @@ -1201,20 +1199,20 @@ method or constructor of some type.""" parent = None if parent is None: message.warn_node(func, - "Return value is not superclass for constructor; " - "symbol=%r constructed=%r return=%r" % ( - func.symbol, - str(origin_node.create_type()), - str(func.retval.type))) + "Return value is not superclass for constructor; " + "symbol=%r constructed=%r return=%r" % + (func.symbol, + str(origin_node.create_type()), + str(func.retval.type))) return False else: if origin_node != target: message.warn_node(func, - "Constructor return type mismatch symbol=%r " - "constructed=%r return=%r" % ( - func.symbol, - str(origin_node.create_type()), - str(func.retval.type))) + "Constructor return type mismatch symbol=%r " + "constructed=%r return=%r" % + (func.symbol, + str(origin_node.create_type()), + str(func.retval.type))) return False return True diff --git a/giscanner/transformer.py b/giscanner/transformer.py index f70d756a..75713474 100644 --- a/giscanner/transformer.py +++ b/giscanner/transformer.py @@ -294,8 +294,7 @@ raise ValueError.""" return name (ns, name) = matches[-1] raise TransformerException( - "Skipping foreign identifier %r from namespace %s" % ( - ident, ns.name, )) + "Skipping foreign identifier %r from namespace %s" % (ident, ns.name, )) return None def _strip_symbol(self, symbol): diff --git a/misc/pep8.py b/misc/pep8.py index f99ae3a3..8413270f 100644 --- a/misc/pep8.py +++ b/misc/pep8.py @@ -45,7 +45,7 @@ W warnings 700 statements 900 syntax error """ -__version__ = '1.4.5' +__version__ = '1.4.6' import os import sys @@ -63,13 +63,13 @@ except ImportError: from ConfigParser import RawConfigParser DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__' -DEFAULT_IGNORE = 'E226,E24' +DEFAULT_IGNORE = 'E123,E226,E24' if sys.platform == 'win32': DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') else: DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'pep8') -PROJECT_CONFIG = ('.pep8', 'tox.ini', 'setup.cfg') +PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') MAX_LINE_LENGTH = 79 REPORT_FORMAT = { @@ -215,9 +215,7 @@ def maximum_line_length(physical_line, max_line_length): """ line = physical_line.rstrip() length = len(line) - if length > max_line_length: - if noqa(line): - return + if length > max_line_length and not noqa(line): if hasattr(line, 'decode'): # Python 2 # The line could contain multi-byte characters try: @@ -383,7 +381,8 @@ def indentation(logical_line, previous_logical, indent_char, yield 0, "E113 unexpected indentation" -def continuation_line_indentation(logical_line, tokens, indent_level, verbose): +def continued_indentation(logical_line, tokens, indent_level, hang_closing, + noqa, verbose): r""" Continuation lines should align wrapped elements either vertically using Python's implicit line joining inside parentheses, brackets and braces, or @@ -411,7 +410,7 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose): """ first_row = tokens[0][2][0] nrows = 1 + tokens[-1][2][0] - first_row - if nrows == 1 or noqa(tokens[0][4]): + if noqa or nrows == 1: return # indent_next tells us whether the next block is indented; assuming @@ -459,17 +458,19 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose): # an unbracketed continuation line (ie, backslash) open_row = 0 hang = rel_indent[row] - rel_indent[open_row] - visual_indent = indent_chances.get(start[1]) - - if token_type == tokenize.OP and text in ']})': - # this line starts with a closing bracket - if indent[depth]: - if start[1] != indent[depth]: - yield (start, "E124 closing bracket does not match " - "visual indentation") - elif hang: - yield (start, "E123 closing bracket does not match " - "indentation of opening bracket's line") + close_bracket = (token_type == tokenize.OP and text in ']})') + visual_indent = (not close_bracket and hang > 0 and + indent_chances.get(start[1])) + + if close_bracket and indent[depth]: + # closing bracket for visual indent + if start[1] != indent[depth]: + yield (start, "E124 closing bracket does not match " + "visual indentation") + elif close_bracket and not hang: + # closing bracket matches indentation of opening bracket's line + if hang_closing: + yield start, "E133 closing bracket is missing indentation" elif visual_indent is True: # visual indent is verified if not indent[depth]: @@ -483,7 +484,9 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose): "under-indented for visual indent") elif hang == 4 or (indent_next and rel_indent[row] == 8): # hanging indent is verified - pass + if close_bracket and not hang_closing: + yield (start, "E123 closing bracket does not match " + "indentation of opening bracket's line") else: # indent is broken if hang <= 0: @@ -535,6 +538,7 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose): for idx in range(row, -1, -1): if parens[idx]: parens[idx] -= 1 + rel_indent[row] = rel_indent[idx] break assert len(indent) == depth + 1 if start[1] not in indent_chances: @@ -543,7 +547,7 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose): last_token_multiline = (start[0] != end[0]) - if indent_next and rel_indent[-1] == 4: + if indent_next and expand_indent(line) == indent_level + 4: yield (last_indent, "E125 continuation line does not distinguish " "itself from next logical line") @@ -565,11 +569,9 @@ def whitespace_before_parameters(logical_line, tokens): E211: dict ['key'] = list[index] E211: dict['key'] = list [index] """ - prev_type = tokens[0][0] - prev_text = tokens[0][1] - prev_end = tokens[0][3] + prev_type, prev_text, __, prev_end, __ = tokens[0] for index in range(1, len(tokens)): - token_type, text, start, end, line = tokens[index] + token_type, text, start, end, __ = tokens[index] if (token_type == tokenize.OP and text in '([' and start != prev_end and @@ -844,19 +846,21 @@ def compound_statements(logical_line): line = logical_line last_char = len(line) - 1 found = line.find(':') - if -1 < found < last_char: + while -1 < found < last_char: before = line[:found] if (before.count('{') <= before.count('}') and # {'a': 1} (dict) before.count('[') <= before.count(']') and # [1:2] (slice) before.count('(') <= before.count(')') and # (Python 3 annotation) not LAMBDA_REGEX.search(before)): # lambda x: x yield found, "E701 multiple statements on one line (colon)" + found = line.find(':', found + 1) found = line.find(';') - if -1 < found: + while -1 < found: if found < last_char: yield found, "E702 multiple statements on one line (semicolon)" else: yield found, "E703 statement ends with a semicolon" + found = line.find(';', found + 1) def explicit_line_join(logical_line, tokens): @@ -894,7 +898,7 @@ def explicit_line_join(logical_line, tokens): parens -= 1 -def comparison_to_singleton(logical_line): +def comparison_to_singleton(logical_line, noqa): """ Comparisons to singletons like None should always be done with "is" or "is not", never the equality operators. @@ -908,7 +912,7 @@ def comparison_to_singleton(logical_line): set to some other value. The other value might have a type (such as a container) that could be false in a boolean context! """ - match = COMPARE_SINGLETON_REGEX.search(logical_line) + match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) if match: same = (match.group(1) == '==') singleton = match.group(2) @@ -1018,7 +1022,6 @@ if '' == ''.encode(): return f.readlines() finally: f.close() - isidentifier = re.compile(r'[a-zA-Z_]\w*').match stdin_get_value = sys.stdin.read else: @@ -1036,7 +1039,6 @@ else: return f.readlines() finally: f.close() - isidentifier = str.isidentifier def stdin_get_value(): @@ -1185,6 +1187,7 @@ class Checker(object): self._logical_checks = options.logical_checks self._ast_checks = options.ast_checks self.max_line_length = options.max_line_length + self.hang_closing = options.hang_closing self.verbose = options.verbose self.filename = filename if filename is None: @@ -1202,14 +1205,24 @@ class Checker(object): self.lines = [] else: self.lines = lines + if self.lines: + ord0 = ord(self.lines[0][0]) + if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM + if ord0 == 0xfeff: + self.lines[0] = self.lines[0][1:] + elif self.lines[0][:3] == '\xef\xbb\xbf': + self.lines[0] = self.lines[0][3:] self.report = report or options.report self.report_error = self.report.error def report_invalid_syntax(self): exc_type, exc = sys.exc_info()[:2] - offset = exc.args[1] - if len(offset) > 2: - offset = offset[1:3] + if len(exc.args) > 1: + offset = exc.args[1] + if len(offset) > 2: + offset = offset[1:3] + else: + offset = (1, 0) self.report_error(offset[0], offset[1] or 0, 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), self.report_invalid_syntax) @@ -1262,10 +1275,14 @@ class Checker(object): """ self.mapping = [] logical = [] + comments = [] length = 0 previous = None for token in self.tokens: token_type, text = token[0:2] + if token_type == tokenize.COMMENT: + comments.append(text) + continue if token_type in SKIP_TOKENS: continue if token_type == tokenize.STRING: @@ -1288,6 +1305,7 @@ class Checker(object): length += len(text) previous = token self.logical_line = ''.join(logical) + self.noqa = comments and noqa(''.join(comments)) # With Python 2, if the line ends with '\r\r\n' the assertion fails # assert self.logical_line.strip() == self.logical_line @@ -1321,7 +1339,7 @@ class Checker(object): def check_ast(self): try: tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) - except SyntaxError: + except (SyntaxError, TypeError): return self.report_invalid_syntax() for name, cls, _ in self._ast_checks: checker = cls(tree, self.filename) @@ -1578,7 +1596,7 @@ class StyleGuide(object): options.ignore = tuple(DEFAULT_IGNORE.split(',')) else: # Ignore all checks which are not explicitly selected - options.ignore = tuple(options.ignore or options.select and ('',)) + options.ignore = ('',) if options.select else tuple(options.ignore) options.benchmark_keys = BENCHMARK_KEYS[:] options.ignore_code = self.ignore_code options.physical_checks = self.get_checks('physical_line') @@ -1631,23 +1649,26 @@ class StyleGuide(object): print('directory ' + root) counters['directories'] += 1 for subdir in sorted(dirs): - if self.excluded(os.path.join(root, subdir)): + if self.excluded(subdir, root): dirs.remove(subdir) for filename in sorted(files): # contain a pattern that matches? if ((filename_match(filename, filepatterns) and - not self.excluded(filename))): + not self.excluded(filename, root))): runner(os.path.join(root, filename)) - def excluded(self, filename): + def excluded(self, filename, parent=None): """ Check if options.exclude contains a pattern that matches filename. """ + if not self.options.exclude: + return False basename = os.path.basename(filename) - return any((filename_match(filename, self.options.exclude, - default=False), - filename_match(basename, self.options.exclude, - default=False))) + if filename_match(basename, self.options.exclude): + return True + if parent: + filename = os.path.join(parent, filename) + return filename_match(filename, self.options.exclude) def ignore_code(self, code): """ @@ -1677,8 +1698,9 @@ def get_parser(prog='pep8', version=__version__): parser = OptionParser(prog=prog, version=version, usage="%prog [options] input ...") parser.config_options = [ - 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'count', - 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] + 'exclude', 'filename', 'select', 'ignore', 'max-line-length', + 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', + 'show-source', 'statistics', 'verbose'] parser.add_option('-v', '--verbose', default=0, action='count', help="print status messages, or debug with -vv") parser.add_option('-q', '--quiet', default=0, action='count', @@ -1713,6 +1735,9 @@ def get_parser(prog='pep8', version=__version__): default=MAX_LINE_LENGTH, help="set maximum allowed line length " "(default: %default)") + parser.add_option('--hang-closing', action='store_true', + help="hang closing bracket instead of matching " + "indentation of opening bracket's line") parser.add_option('--format', metavar='format', default='default', help="set the error format [default|pylint|<custom>]") parser.add_option('--diff', action='store_true', @@ -1741,17 +1766,11 @@ def read_config(options, args, arglist, parser): parent = tail = args and os.path.abspath(os.path.commonprefix(args)) while tail: - for name in PROJECT_CONFIG: - local_conf = os.path.join(parent, name) - if os.path.isfile(local_conf): - break - else: - parent, tail = os.path.split(parent) - continue - if options.verbose: - print('local configuration: %s' % local_conf) - config.read(local_conf) - break + if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): + if options.verbose: + print('local configuration: in %s' % parent) + break + parent, tail = os.path.split(parent) pep8_section = parser.prog if config.has_section(pep8_section): @@ -1819,13 +1838,10 @@ def process_options(arglist=None, parse_argv=False, config_file=None, options = read_config(options, args, arglist, parser) options.reporter = parse_argv and options.quiet == 1 and FileReport - if options.filename: - options.filename = options.filename.split(',') + options.filename = options.filename and options.filename.split(',') options.exclude = options.exclude.split(',') - if options.select: - options.select = options.select.split(',') - if options.ignore: - options.ignore = options.ignore.split(',') + options.select = options.select and options.select.split(',') + options.ignore = options.ignore and options.ignore.split(',') if options.diff: options.reporter = DiffReport |