diff options
| author | Timothy Crosley <timothy.crosley@gmail.com> | 2020-10-06 21:21:17 -0700 |
|---|---|---|
| committer | Timothy Crosley <timothy.crosley@gmail.com> | 2020-10-06 21:21:17 -0700 |
| commit | dd2d5f1b43fadd5487293d2a2734b5dca08a32ff (patch) | |
| tree | 768d4f2a4636fe43f1bff6b7965ead02102a0bef /isort | |
| parent | e3dc4bdcd8d33bc2f2e1c390cd0c34a5071434b3 (diff) | |
| parent | d2954516ef4de49fd3ded305151a5bfb84827b1d (diff) | |
| download | isort-issue/1443/initial-progress.tar.gz | |
Merge branch 'develop' of https://github.com/timothycrosley/isort into issue/1443/initial-progressissue/1443/initial-progress
Diffstat (limited to 'isort')
| -rw-r--r-- | isort/_version.py | 2 | ||||
| -rw-r--r-- | isort/api.py | 7 | ||||
| -rw-r--r-- | isort/core.py | 51 | ||||
| -rw-r--r-- | isort/deprecated/finders.py | 14 | ||||
| -rw-r--r-- | isort/exceptions.py | 37 | ||||
| -rw-r--r-- | isort/io.py | 15 | ||||
| -rw-r--r-- | isort/literal.py | 21 | ||||
| -rw-r--r-- | isort/main.py | 136 | ||||
| -rw-r--r-- | isort/output.py | 86 | ||||
| -rw-r--r-- | isort/parse.py | 39 | ||||
| -rw-r--r-- | isort/place.py | 2 | ||||
| -rw-r--r-- | isort/profiles.py | 8 | ||||
| -rw-r--r-- | isort/pylama_isort.py | 22 | ||||
| -rw-r--r-- | isort/settings.py | 50 | ||||
| -rw-r--r-- | isort/setuptools_commands.py | 4 | ||||
| -rw-r--r-- | isort/sorting.py | 5 | ||||
| -rw-r--r-- | isort/utils.py | 13 | ||||
| -rw-r--r-- | isort/wrap.py | 14 |
18 files changed, 411 insertions, 115 deletions
diff --git a/isort/_version.py b/isort/_version.py index cfda0f8e..e82c92eb 100644 --- a/isort/_version.py +++ b/isort/_version.py @@ -1 +1 @@ -__version__ = "5.4.2" +__version__ = "5.5.4" diff --git a/isort/api.py b/isort/api.py index cbcc3e6e..a8cecb62 100644 --- a/isort/api.py +++ b/isort/api.py @@ -210,7 +210,7 @@ def check_stream( ) printer = create_terminal_printer(color=config.color_output) if not changed: - if config.verbose: + if config.verbose and not config.only_modified: printer.success(f"{file_path or ''} Everything Looks Good!") return True else: @@ -299,6 +299,7 @@ def sort_file( """ with io.File.read(filename) as source_file: actual_file_path = file_path or source_file.path + config = _config(path=actual_file_path, config=config, **config_kwargs) changed: bool = False try: if write_to_stdout: @@ -309,7 +310,6 @@ def sort_file( file_path=actual_file_path, disregard_skip=disregard_skip, extension=extension, - **config_kwargs, ) else: tmp_file = source_file.path.with_suffix(source_file.path.suffix + ".isorted") @@ -325,7 +325,6 @@ def sort_file( file_path=actual_file_path, disregard_skip=disregard_skip, extension=extension, - **config_kwargs, ) if changed: if show_diff or ask_to_apply: @@ -355,7 +354,7 @@ def sort_file( try: # Python 3.8+: use `missing_ok=True` instead of try except. tmp_file.unlink() except FileNotFoundError: - pass + pass # pragma: no cover except ExistingSyntaxErrors: warn(f"{actual_file_path} unable to sort due to existing syntax errors") except IntroducedSyntaxErrors: # pragma: no cover diff --git a/isort/core.py b/isort/core.py index 46900e02..7f4c2c8a 100644 --- a/isort/core.py +++ b/isort/core.py @@ -58,7 +58,6 @@ def process( contains_imports: bool = False in_top_comment: bool = False first_import_section: bool = True - section_comments = [f"# {heading}" for heading in config.import_headings.values()] indent: str = "" isort_off: bool = False code_sorting: Union[bool, str] = False @@ -68,6 +67,7 @@ def process( made_changes: bool = False stripped_line: str = "" end_of_file: bool = False + verbose_output: List[str] = [] if config.float_to_top: new_input = "" @@ -84,9 +84,13 @@ def process( if line == "# isort: off\n": isort_off = True if current: + if add_imports: + current += line_separator + line_separator.join(add_imports) + add_imports = [] parsed = parse.file_contents(current, config=config) + verbose_output += parsed.verbose_output extra_space = "" - while current[-1] == "\n": + while current and current[-1] == "\n": extra_space += "\n" current = current[:-1] extra_space = extra_space.replace("\n", "", 1) @@ -146,11 +150,11 @@ def process( if ( (index == 0 or (index in (1, 2) and not contains_imports)) and stripped_line.startswith("#") - and stripped_line not in section_comments + and stripped_line not in config.section_comments ): in_top_comment = True elif in_top_comment: - if not line.startswith("#") or stripped_line in section_comments: + if not line.startswith("#") or stripped_line in config.section_comments: in_top_comment = False first_comment_index_end = index - 1 @@ -210,8 +214,13 @@ def process( else: code_sorting_section += line line = "" - elif stripped_line in config.section_comments and not import_section: - import_section += line + elif stripped_line in config.section_comments: + if import_section and not contains_imports: + output_stream.write(import_section) + import_section = line + not_imports = False + else: + import_section += line indent = line[: -len(line.lstrip())] elif not (stripped_line or contains_imports): not_imports = True @@ -302,6 +311,7 @@ def process( raw_import_section += line if not contains_imports: output_stream.write(import_section) + else: leading_whitespace = import_section[: -len(import_section.lstrip())] trailing_whitespace = import_section[len(import_section.rstrip()) :] @@ -317,8 +327,11 @@ def process( line[len(indent) :] for line in import_section.splitlines(keepends=True) ) + parsed_content = parse.file_contents(import_section, config=config) + verbose_output += parsed_content.verbose_output + sorted_import_section = output.sorted_imports( - parse.file_contents(import_section, config=config), + parsed_content, _indented_config(config, indent), extension, import_type="cimport" if cimports else "import", @@ -337,7 +350,6 @@ def process( line_separator=line_separator, ignore_whitespace=config.ignore_whitespace, ) - output_stream.write(sorted_import_section) if not line and not indent and next_import_section: output_stream.write(line_separator) @@ -358,6 +370,29 @@ def process( output_stream.write(line) not_imports = False + if stripped_line and not in_quote and not import_section and not next_import_section: + if stripped_line == "yield": + while not stripped_line or stripped_line == "yield": + new_line = input_stream.readline() + if not new_line: + break + + output_stream.write(new_line) + stripped_line = new_line.strip().split("#")[0] + + if stripped_line.startswith("raise") or stripped_line.startswith("yield"): + while stripped_line.endswith("\\"): + new_line = input_stream.readline() + if not new_line: + break + + output_stream.write(new_line) + stripped_line = new_line.strip().split("#")[0] + + if made_changes and config.only_modified: + for output_str in verbose_output: + print(output_str) + return made_changes diff --git a/isort/deprecated/finders.py b/isort/deprecated/finders.py index 77eb23fa..dbb6fec0 100644 --- a/isort/deprecated/finders.py +++ b/isort/deprecated/finders.py @@ -7,6 +7,7 @@ import re import sys import sysconfig from abc import ABCMeta, abstractmethod +from contextlib import contextmanager from fnmatch import fnmatch from functools import lru_cache from glob import glob @@ -15,7 +16,7 @@ from typing import Dict, Iterable, Iterator, List, Optional, Pattern, Sequence, from isort import sections from isort.settings import KNOWN_SECTION_MAPPING, Config -from isort.utils import chdir, exists_case_sensitive +from isort.utils import exists_case_sensitive try: from pipreqs import pipreqs @@ -36,6 +37,17 @@ except ImportError: Pipfile = None +@contextmanager +def chdir(path: str) -> Iterator[None]: + """Context manager for changing dir and restoring previous workdir after exit.""" + curdir = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curdir) + + class BaseFinder(metaclass=ABCMeta): def __init__(self, config: Config) -> None: self.config = config diff --git a/isort/exceptions.py b/isort/exceptions.py index 9f45744c..b98454a2 100644 --- a/isort/exceptions.py +++ b/isort/exceptions.py @@ -1,4 +1,7 @@ """All isort specific exception classes should be defined here""" +from pathlib import Path +from typing import Any, Dict, Union + from .profiles import profiles @@ -132,3 +135,37 @@ class AssignmentsFormatMismatch(ISortError): "...\n\n" ) self.code = code + + +class UnsupportedSettings(ISortError): + """Raised when settings are passed into isort (either from config, CLI, or runtime) + that it doesn't support. + """ + + @staticmethod + def _format_option(name: str, value: Any, source: str) -> str: + return f"\t- {name} = {value} (source: '{source}')" + + def __init__(self, unsupported_settings: Dict[str, Dict[str, str]]): + errors = "\n".join( + self._format_option(name, **option) for name, option in unsupported_settings.items() + ) + + super().__init__( + "isort was provided settings that it doesn't support:\n\n" + f"{errors}\n\n" + "For a complete and up-to-date listing of supported settings see: " + "https://pycqa.github.io/isort/docs/configuration/options/.\n" + ) + self.unsupported_settings = unsupported_settings + + +class UnsupportedEncoding(ISortError): + """Raised when isort encounters an encoding error while trying to read a file""" + + def __init__( + self, + filename: Union[str, Path], + ): + super().__init__(f"Unknown or unsupported encoding in {filename}") + self.filename = filename diff --git a/isort/io.py b/isort/io.py index a0357347..7ff2807d 100644 --- a/isort/io.py +++ b/isort/io.py @@ -4,7 +4,9 @@ import tokenize from contextlib import contextmanager from io import BytesIO, StringIO, TextIOWrapper from pathlib import Path -from typing import Iterator, NamedTuple, TextIO, Union +from typing import Callable, Iterator, NamedTuple, TextIO, Union + +from isort.exceptions import UnsupportedEncoding _ENCODING_PATTERN = re.compile(br"^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)") @@ -15,8 +17,15 @@ class File(NamedTuple): encoding: str @staticmethod + def detect_encoding(filename: str, readline: Callable[[], bytes]): + try: + return tokenize.detect_encoding(readline)[0] + except Exception: + raise UnsupportedEncoding(filename) + + @staticmethod def from_contents(contents: str, filename: str) -> "File": - encoding, _ = tokenize.detect_encoding(BytesIO(contents.encode("utf-8")).readline) + encoding = File.detect_encoding(filename, BytesIO(contents.encode("utf-8")).readline) return File(StringIO(contents), path=Path(filename).resolve(), encoding=encoding) @property @@ -30,7 +39,7 @@ class File(NamedTuple): """ buffer = open(filename, "rb") try: - encoding, _ = tokenize.detect_encoding(buffer.readline) + encoding = File.detect_encoding(filename, buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, line_buffering=True, newline="") text.mode = "r" # type: ignore diff --git a/isort/literal.py b/isort/literal.py index 28e0855c..01bd05e7 100644 --- a/isort/literal.py +++ b/isort/literal.py @@ -21,17 +21,18 @@ type_mapping: Dict[str, Tuple[type, Callable[[Any, ISortPrettyPrinter], str]]] = def assignments(code: str) -> str: - sort_assignments = {} + values = {} for line in code.splitlines(keepends=True): - if line: - if " = " not in line: - raise AssignmentsFormatMismatch(code) - else: - variable_name, value = line.split(" = ", 1) - sort_assignments[variable_name] = value - - sorted_assignments = dict(sorted(sort_assignments.items(), key=lambda item: item[1])) - return "".join(f"{key} = {value}" for key, value in sorted_assignments.items()) + if not line.strip(): + continue + if " = " not in line: + raise AssignmentsFormatMismatch(code) + variable_name, value = line.split(" = ", 1) + values[variable_name] = value + + return "".join( + f"{variable_name} = {values[variable_name]}" for variable_name in sorted(values.keys()) + ) def assignment(code: str, sort_type: str, extension: str, config: Config = DEFAULT_CONFIG) -> str: diff --git a/isort/main.py b/isort/main.py index 990e1218..a8e59f0e 100644 --- a/isort/main.py +++ b/isort/main.py @@ -10,7 +10,8 @@ from typing import Any, Dict, Iterable, Iterator, List, Optional, Sequence, Set from warnings import warn from . import __version__, api, sections -from .exceptions import FileSkipped +from .exceptions import FileSkipped, UnsupportedEncoding +from .format import create_terminal_printer from .logo import ASCII_ART from .profiles import profiles from .settings import VALID_PY_TARGETS, Config, WrapModes @@ -66,9 +67,10 @@ Visit https://pycqa.github.io/isort/ for complete information about how to use i class SortAttempt: - def __init__(self, incorrectly_sorted: bool, skipped: bool) -> None: + def __init__(self, incorrectly_sorted: bool, skipped: bool, supported_encoding: bool) -> None: self.incorrectly_sorted = incorrectly_sorted self.skipped = skipped + self.supported_encoding = supported_encoding def sort_imports( @@ -87,7 +89,7 @@ def sort_imports( incorrectly_sorted = not api.check_file(file_name, config=config, **kwargs) except FileSkipped: skipped = True - return SortAttempt(incorrectly_sorted, skipped) + return SortAttempt(incorrectly_sorted, skipped, True) else: try: incorrectly_sorted = not api.sort_file( @@ -99,13 +101,27 @@ def sort_imports( ) except FileSkipped: skipped = True - return SortAttempt(incorrectly_sorted, skipped) + return SortAttempt(incorrectly_sorted, skipped, True) except (OSError, ValueError) as error: warn(f"Unable to parse file {file_name} due to {error}") return None + except UnsupportedEncoding: + if config.verbose: + warn(f"Encoding not supported for {file_name}") + return SortAttempt(incorrectly_sorted, skipped, False) + except Exception: + printer = create_terminal_printer(color=config.color_output) + printer.error( + f"Unrecoverable exception thrown when parsing {file_name}! " + "This should NEVER happen.\n" + "If encountered, please open an issue: https://github.com/PyCQA/isort/issues/new" + ) + raise -def iter_source_code(paths: Iterable[str], config: Config, skipped: List[str]) -> Iterator[str]: +def iter_source_code( + paths: Iterable[str], config: Config, skipped: List[str], broken: List[str] +) -> Iterator[str]: """Iterate over all Python source files defined in paths.""" visited_dirs: Set[Path] = set() @@ -133,6 +149,8 @@ def iter_source_code(paths: Iterable[str], config: Config, skipped: List[str]) - skipped.append(filename) else: yield filepath + elif not os.path.exists(path): + broken.append(path) else: yield path @@ -258,7 +276,12 @@ def _build_arg_parser() -> argparse.ArgumentParser: "--future", dest="known_future_library", action="append", - help="Force isort to recognize a module as part of the future compatibility libraries.", + help="Force isort to recognize a module as part of Python's internal future compatibility " + "libraries. WARNING: this overrides the behavior of __future__ handling and therefore" + " can result in code that can't execute. If you're looking to add dependencies such " + "as six a better option is to create a another section below --future using custom " + "sections. See: https://github.com/PyCQA/isort#custom-sections-and-ordering and the " + "discussion here: https://github.com/PyCQA/isort/issues/1463.", ) parser.add_argument( "--fas", @@ -288,8 +311,9 @@ def _build_arg_parser() -> argparse.ArgumentParser: const=2, type=int, dest="force_grid_wrap", - help="Force number of from imports (defaults to 2) to be grid wrapped regardless of line " - "length", + help="Force number of from imports (defaults to 2 when passed as CLI flag without value)" + "to be grid wrapped regardless of line " + "length. If 0 is passed in (the global default) only line length is considered.", ) parser.add_argument( "--fss", @@ -329,7 +353,8 @@ def _build_arg_parser() -> argparse.ArgumentParser: parser.add_argument( "--lss", "--length-sort-straight", - help="Sort straight imports by their string length.", + help="Sort straight imports by their string length. Similar to `length_sort` " + "but applies only to straight imports and doesn't affect from imports.", dest="length_sort_straight", action="store_true", ) @@ -619,6 +644,12 @@ def _build_arg_parser() -> argparse.ArgumentParser: help="See isort's determined config, as well as sources of config options.", ) parser.add_argument( + "--show-files", + dest="show_files", + action="store_true", + help="See the files isort will be ran against with the current config options.", + ) + parser.add_argument( "--honor-noqa", dest="honor_noqa", action="store_true", @@ -645,7 +676,8 @@ def _build_arg_parser() -> argparse.ArgumentParser: dest="float_to_top", action="store_true", help="Causes all non-indented imports to float to the top of the file having its imports " - "sorted. It can be an excellent shortcut for collecting imports every once in a while " + "sorted (immediately below the top of file comment).\n" + "This can be an excellent shortcut for collecting imports every once in a while " "when you place them in the middle of a file to avoid context switching.\n\n" "*NOTE*: It currently doesn't work with cimports and introduces some extra over-head " "and a performance penalty.", @@ -727,6 +759,23 @@ def _build_arg_parser() -> argparse.ArgumentParser: help=argparse.SUPPRESS, ) + parser.add_argument( + "--only-sections", + "--os", + dest="only_sections", + action="store_true", + help="Causes imports to be sorted only based on their sections like STDLIB,THIRDPARTY etc. " + "Imports are unaltered and keep their relative positions within the different sections.", + ) + + parser.add_argument( + "--only-modified", + "--om", + dest="only_modified", + action="store_true", + help="Suppresses verbose output for non-modified files.", + ) + return parser @@ -775,6 +824,9 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = return show_config: bool = arguments.pop("show_config", False) + show_files: bool = arguments.pop("show_files", False) + if show_config and show_files: + sys.exit("Error: either specify show-config or show-files not both.") if "settings_path" in arguments: if os.path.isfile(arguments["settings_path"]): @@ -812,6 +864,8 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = deprecated_flags = config_dict.pop("deprecated_flags", False) remapped_deprecated_args = config_dict.pop("remapped_deprecated_args", False) wrong_sorted_files = False + all_attempt_broken = False + no_valid_encodings = False if "src_paths" in config_dict: config_dict["src_paths"] = { @@ -823,13 +877,27 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = print(json.dumps(config.__dict__, indent=4, separators=(",", ": "), default=_preconvert)) return elif file_names == ["-"]: - api.sort_stream( - input_stream=sys.stdin if stdin is None else stdin, - output_stream=sys.stdout, - config=config, - ) + if show_files: + sys.exit("Error: can't show files for streaming input.") + + if check: + incorrectly_sorted = not api.check_stream( + input_stream=sys.stdin if stdin is None else stdin, + config=config, + show_diff=show_diff, + ) + + wrong_sorted_files = incorrectly_sorted + else: + api.sort_stream( + input_stream=sys.stdin if stdin is None else stdin, + output_stream=sys.stdout, + config=config, + show_diff=show_diff, + ) else: skipped: List[str] = [] + broken: List[str] = [] if config.filter_files: filtered_files = [] @@ -840,8 +908,14 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = filtered_files.append(file_name) file_names = filtered_files - file_names = iter_source_code(file_names, config, skipped) + file_names = iter_source_code(file_names, config, skipped, broken) + if show_files: + for file_name in file_names: + print(file_name) + return num_skipped = 0 + num_broken = 0 + num_invalid_encoding = 0 if config.verbose: print(ASCII_ART) @@ -873,6 +947,9 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = for file_name in file_names ) + # If any files passed in are missing considered as error, should be removed + is_no_attempt = True + any_encoding_valid = False for sort_attempt in attempt_iterator: if not sort_attempt: continue # pragma: no cover - shouldn't happen, satisfies type constraint @@ -884,6 +961,13 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = 1 # pragma: no cover - shouldn't happen, due to skip in iter_source_code ) + if not sort_attempt.supported_encoding: + num_invalid_encoding += 1 + else: + any_encoding_valid = True + + is_no_attempt = False + num_skipped += len(skipped) if num_skipped and not arguments.get("quiet", False): if config.verbose: @@ -894,6 +978,18 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = ) print(f"Skipped {num_skipped} files") + num_broken += len(broken) + if num_broken and not arguments.get("quite", False): + if config.verbose: + for was_broken in broken: + warn(f"{was_broken} was broken path, make sure it exists correctly") + print(f"Broken {num_broken} paths") + + if num_broken > 0 and is_no_attempt: + all_attempt_broken = True + if num_invalid_encoding > 0 and not any_encoding_valid: + no_valid_encodings = True + if not config.quiet and (remapped_deprecated_args or deprecated_flags): if remapped_deprecated_args: warn( @@ -913,6 +1009,14 @@ def main(argv: Optional[Sequence[str]] = None, stdin: Optional[TextIOWrapper] = if wrong_sorted_files: sys.exit(1) + if all_attempt_broken: + sys.exit(1) + + if no_valid_encodings: + printer = create_terminal_printer(color=config.color_output) + printer.error("No valid encodings.") + sys.exit(1) + if __name__ == "__main__": main() diff --git a/isort/output.py b/isort/output.py index 8cf915f9..d2633ffd 100644 --- a/isort/output.py +++ b/isort/output.py @@ -49,16 +49,19 @@ def sorted_imports( pending_lines_before = False for section in sections: straight_modules = parsed.imports[section]["straight"] - straight_modules = sorting.naturally( - straight_modules, - key=lambda key: sorting.module_key( - key, config, section_name=section, straight_import=True - ), - ) + if not config.only_sections: + straight_modules = sorting.naturally( + straight_modules, + key=lambda key: sorting.module_key( + key, config, section_name=section, straight_import=True + ), + ) + from_modules = parsed.imports[section]["from"] - from_modules = sorting.naturally( - from_modules, key=lambda key: sorting.module_key(key, config, section_name=section) - ) + if not config.only_sections: + from_modules = sorting.naturally( + from_modules, key=lambda key: sorting.module_key(key, config, section_name=section) + ) straight_imports = _with_straight_imports( parsed, config, straight_modules, section, remove_imports, import_type @@ -89,7 +92,7 @@ def sorted_imports( comments_above = [] else: new_section_output.append(line) - + # only_sections options is not imposed if force_sort_within_sections is True new_section_output = sorting.naturally( new_section_output, key=partial( @@ -99,6 +102,7 @@ def sorted_imports( lexicographical=config.lexicographical, length_sort=config.length_sort, reverse_relative=config.reverse_relative, + group_by_package=config.group_by_package, ), ) @@ -226,16 +230,18 @@ def _with_from_imports( config.force_single_line and module not in config.single_line_exclusions ): ignore_case = config.force_alphabetical_sort_within_sections - from_imports = sorting.naturally( - from_imports, - key=lambda key: sorting.module_key( - key, - config, - True, - ignore_case, - section_name=section, - ), - ) + + if not config.only_sections: + from_imports = sorting.naturally( + from_imports, + key=lambda key: sorting.module_key( + key, + config, + True, + ignore_case, + section_name=section, + ), + ) if remove_imports: from_imports = [ line for line in from_imports if f"{module}.{line}" not in remove_imports @@ -252,7 +258,8 @@ def _with_from_imports( if config.combine_as_imports and not ("*" in from_imports and config.combine_star): if not config.no_inline_sort: for as_import in as_imports: - as_imports[as_import] = sorting.naturally(as_imports[as_import]) + if not config.only_sections: + as_imports[as_import] = sorting.naturally(as_imports[as_import]) for from_import in copy.copy(from_imports): if from_import in as_imports: idx = from_imports.index(from_import) @@ -312,22 +319,41 @@ def _with_from_imports( from_comments = parsed.categorized_comments["straight"].get( f"{module}.{from_import}" ) - output.extend( - with_comments( - from_comments, - wrap.line(import_start + as_import, parsed.line_separator, config), - removed=config.ignore_comments, - comment_prefix=config.comment_prefix, + + if not config.only_sections: + output.extend( + with_comments( + from_comments, + wrap.line( + import_start + as_import, parsed.line_separator, config + ), + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + for as_import in sorting.naturally(as_imports[from_import]) + ) + + else: + output.extend( + with_comments( + from_comments, + wrap.line( + import_start + as_import, parsed.line_separator, config + ), + removed=config.ignore_comments, + comment_prefix=config.comment_prefix, + ) + for as_import in as_imports[from_import] ) - for as_import in sorting.naturally(as_imports[from_import]) - ) else: output.append(wrap.line(single_import_line, parsed.line_separator, config)) comments = None else: while from_imports and from_imports[0] in as_imports: from_import = from_imports.pop(0) - as_imports[from_import] = sorting.naturally(as_imports[from_import]) + + if not config.only_sections: + as_imports[from_import] = sorting.naturally(as_imports[from_import]) from_comments = ( parsed.categorized_comments["straight"].get(f"{module}.{from_import}") or [] ) diff --git a/isort/parse.py b/isort/parse.py index 613f7fa7..9a80e97b 100644 --- a/isort/parse.py +++ b/isort/parse.py @@ -138,6 +138,7 @@ class ParsedContent(NamedTuple): original_line_count: int line_separator: str sections: Any + verbose_output: List[str] def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedContent: @@ -163,6 +164,8 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte "from": defaultdict(list), } imports: OrderedDict[str, Dict[str, Any]] = OrderedDict() + verbose_output: List[str] = [] + for section in chain(config.sections, config.forced_separate): imports[section] = {"straight": OrderedDict(), "from": OrderedDict()} categorized_comments: CommentsDict = { @@ -200,12 +203,18 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte if skipping_line: out_lines.append(line) continue - elif ( + + lstripped_line = line.lstrip() + if ( config.float_to_top and import_index == -1 and line and not in_quote - and not line.strip().startswith("#") + and not lstripped_line.startswith("#") + and not lstripped_line.startswith("'''") + and not lstripped_line.startswith('"""') + and not lstripped_line.startswith("import") + and not lstripped_line.startswith("from") ): import_index = index - 1 while import_index and not in_lines[import_index - 1]: @@ -327,8 +336,10 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte item.replace("{|", "{ ").replace("|}", " }") for item in _strip_syntax(import_string).split() ] - straight_import = True + attach_comments_to: Optional[List[Any]] = None + direct_imports = just_imports[1:] + straight_import = True if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports): straight_import = False while "as" in just_imports: @@ -339,6 +350,9 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte top_level_module = just_imports[0] module = top_level_module + "." + nested_module as_name = just_imports[as_index + 1] + direct_imports.remove(nested_module) + direct_imports.remove(as_name) + direct_imports.remove("as") if nested_module == as_name and config.remove_redundant_aliases: pass elif as_name not in as_map["from"][module]: @@ -374,8 +388,13 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte if type_of_import == "from": import_from = just_imports.pop(0) placed_module = finder(import_from) - if config.verbose: + if config.verbose and not config.only_modified: print(f"from-type place_module for {import_from} returned {placed_module}") + + elif config.verbose: + verbose_output.append( + f"from-type place_module for {import_from} returned {placed_module}" + ) if placed_module == "": warn( f"could not place module {import_from} of line {line} --" @@ -419,11 +438,11 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte if import_from not in root: root[import_from] = OrderedDict( - (module, straight_import) for module in just_imports + (module, module in direct_imports) for module in just_imports ) else: root[import_from].update( - (module, straight_import | root[import_from].get(module, False)) + (module, root[import_from].get(module, False) or module in direct_imports) for module in just_imports ) @@ -463,8 +482,13 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte categorized_comments["above"]["straight"].get(module, []) ) placed_module = finder(module) - if config.verbose: + if config.verbose and not config.only_modified: print(f"else-type place_module for {module} returned {placed_module}") + + elif config.verbose: + verbose_output.append( + f"else-type place_module for {module} returned {placed_module}" + ) if placed_module == "": warn( f"could not place module {module} of line {line} --" @@ -491,4 +515,5 @@ def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedConte original_line_count=original_line_count, line_separator=line_separator, sections=config.sections, + verbose_output=verbose_output, ) diff --git a/isort/place.py b/isort/place.py index 8e3e880f..fcb3dcbd 100644 --- a/isort/place.py +++ b/isort/place.py @@ -54,7 +54,7 @@ def _known_pattern(name: str, config: Config) -> Optional[Tuple[str, str]]: module_names_to_check = (".".join(parts[:first_k]) for first_k in range(len(parts), 0, -1)) for module_name_to_check in module_names_to_check: for pattern, placement in config.known_patterns: - if pattern.match(module_name_to_check): + if placement in config.sections and pattern.match(module_name_to_check): return (placement, f"Matched configured known pattern {pattern}") return None diff --git a/isort/profiles.py b/isort/profiles.py index cd976cd2..cb8cb568 100644 --- a/isort/profiles.py +++ b/isort/profiles.py @@ -15,12 +15,18 @@ django = { "multi_line_output": 5, "line_length": 79, } -pycharm = {"multi_line_output": 3, "force_grid_wrap": 2} +pycharm = { + "multi_line_output": 3, + "force_grid_wrap": 2, + "lines_after_imports": 2, +} google = { "force_single_line": True, "force_sort_within_sections": True, "lexicographical": True, "single_line_exclusions": ("typing",), + "order_by_type": False, + "group_by_package": True, } open_stack = { "force_single_line": True, diff --git a/isort/pylama_isort.py b/isort/pylama_isort.py index 0e14d569..4d4ffb92 100644 --- a/isort/pylama_isort.py +++ b/isort/pylama_isort.py @@ -5,6 +5,8 @@ from typing import Any, Dict, List from pylama.lint import Linter as BaseLinter +from isort.exceptions import FileSkipped + from . import api @@ -25,9 +27,17 @@ class Linter(BaseLinter): def run(self, path: str, **meta: Any) -> List[Dict[str, Any]]: """Lint the file. Return an array of error dicts if appropriate.""" with supress_stdout(): - if not api.check_file(path): - return [ - {"lnum": 0, "col": 0, "text": "Incorrectly sorted imports.", "type": "ISORT"} - ] - else: - return [] + try: + if not api.check_file(path, disregard_skip=False): + return [ + { + "lnum": 0, + "col": 0, + "text": "Incorrectly sorted imports.", + "type": "ISORT", + } + ] + except FileSkipped: + pass + + return [] diff --git a/isort/settings.py b/isort/settings.py index 1e10ab60..a89f6d69 100644 --- a/isort/settings.py +++ b/isort/settings.py @@ -18,7 +18,12 @@ from warnings import warn from . import stdlibs from ._future import dataclass, field from ._vendored import toml -from .exceptions import FormattingPluginDoesNotExist, InvalidSettingsPath, ProfileDoesNotExist +from .exceptions import ( + FormattingPluginDoesNotExist, + InvalidSettingsPath, + ProfileDoesNotExist, + UnsupportedSettings, +) from .profiles import profiles from .sections import DEFAULT as SECTION_DEFAULTS from .sections import FIRSTPARTY, FUTURE, LOCALFOLDER, STDLIB, THIRDPARTY @@ -26,7 +31,7 @@ from .wrap_modes import WrapModes from .wrap_modes import from_string as wrap_mode_from_string _SHEBANG_RE = re.compile(br"^#!.*\bpython[23w]?\b") -SUPPORTED_EXTENSIONS = frozenset({"py", "pyi", "pyx"}) +SUPPORTED_EXTENSIONS = frozenset({"py", "pyi", "pyx", "pxd"}) BLOCKED_EXTENSIONS = frozenset({"pex"}) FILE_SKIP_COMMENTS: Tuple[str, ...] = ( "isort:" + "skip_file", @@ -54,11 +59,14 @@ DEFAULT_SKIP: FrozenSet[str] = frozenset( ".hg", ".mypy_cache", ".nox", + ".svn", + ".bzr", "_build", "buck-out", "build", "dist", ".pants.d", + ".direnv", "node_modules", } ) @@ -161,6 +169,7 @@ class _Config: force_grid_wrap: int = 0 force_sort_within_sections: bool = False lexicographical: bool = False + group_by_package: bool = False ignore_whitespace: bool = False no_lines_before: FrozenSet[str] = frozenset() no_inline_sort: bool = False @@ -189,6 +198,8 @@ class _Config: classes: FrozenSet[str] = frozenset() variables: FrozenSet[str] = frozenset() dedup_headings: bool = False + only_sections: bool = False + only_modified: bool = False def __post_init__(self): py_version = self.py_version @@ -255,6 +266,11 @@ class Config(_Config): super().__init__(**config_vars) # type: ignore return + # We can't use self.quiet to conditionally show warnings before super.__init__() is called + # at the end of this method. _Config is also frozen so setting self.quiet isn't possible. + # Therefore we extract quiet early here in a variable and use that in warning conditions. + quiet = config_overrides.get("quiet", False) + sources: List[Dict[str, Any]] = [_DEFAULT_SETTINGS] config_settings: Dict[str, Any] @@ -265,6 +281,14 @@ class Config(_Config): CONFIG_SECTIONS.get(os.path.basename(settings_file), FALLBACK_CONFIG_SECTIONS), ) project_root = os.path.dirname(settings_file) + if not config_settings and not quiet: + warn( + f"A custom settings file was specified: {settings_file} but no configuration " + "was found inside. This can happen when [settings] is used as the config " + "header instead of [isort]. " + "See: https://pycqa.github.io/isort/docs/configuration/config_files" + "/#custom_config_files for more information." + ) elif settings_path: if not os.path.exists(settings_path): raise InvalidSettingsPath(settings_path) @@ -324,7 +348,7 @@ class Config(_Config): combined_config.pop(key) if maps_to_section in KNOWN_SECTION_MAPPING: section_name = f"known_{KNOWN_SECTION_MAPPING[maps_to_section].lower()}" - if section_name in combined_config and not self.quiet: + if section_name in combined_config and not quiet: warn( f"Can't set both {key} and {section_name} in the same config file.\n" f"Default to {section_name} if unsure." @@ -336,10 +360,7 @@ class Config(_Config): combined_config[section_name] = frozenset(value) else: known_other[import_heading] = frozenset(value) - if ( - maps_to_section not in combined_config.get("sections", ()) - and not self.quiet - ): + if maps_to_section not in combined_config.get("sections", ()) and not quiet: warn( f"`{key}` setting is defined, but {maps_to_section} is not" " included in `sections` config option:" @@ -406,7 +427,7 @@ class Config(_Config): if deprecated_options_used: for deprecated_option in deprecated_options_used: combined_config.pop(deprecated_option) - if not self.quiet: + if not quiet: warn( "W0503: Deprecated config options were used: " f"{', '.join(deprecated_options_used)}." @@ -420,6 +441,19 @@ class Config(_Config): combined_config.pop(f"{IMPORT_HEADING_PREFIX}{import_heading_key}") combined_config["import_headings"] = import_headings + unsupported_config_errors = {} + for option in set(combined_config.keys()).difference( + getattr(_Config, "__dataclass_fields__", {}).keys() + ): + for source in reversed(sources): + if option in source: + unsupported_config_errors[option] = { + "value": source[option], + "source": source["source"], + } + if unsupported_config_errors: + raise UnsupportedSettings(unsupported_config_errors) + super().__init__(sources=tuple(sources), **combined_config) # type: ignore def is_supported_filetype(self, file_name: str): diff --git a/isort/setuptools_commands.py b/isort/setuptools_commands.py index f6700887..96e41dd0 100644 --- a/isort/setuptools_commands.py +++ b/isort/setuptools_commands.py @@ -31,13 +31,13 @@ class ISortCommand(setuptools.Command): def distribution_files(self) -> Iterator[str]: """Find distribution packages.""" # This is verbatim from flake8 - if self.distribution.packages: + if self.distribution.packages: # pragma: no cover package_dirs = self.distribution.package_dir or {} for package in self.distribution.packages: pkg_dir = package if package in package_dirs: pkg_dir = package_dirs[package] - elif "" in package_dirs: + elif "" in package_dirs: # pragma: no cover pkg_dir = package_dirs[""] + os.path.sep + pkg_dir yield pkg_dir.replace(".", os.path.sep) diff --git a/isort/sorting.py b/isort/sorting.py index 780747a3..cab77011 100644 --- a/isort/sorting.py +++ b/isort/sorting.py @@ -58,13 +58,16 @@ def section_key( lexicographical: bool = False, length_sort: bool = False, reverse_relative: bool = False, + group_by_package: bool = False, ) -> str: section = "B" if reverse_relative and line.startswith("from ."): match = re.match(r"^from (\.+)\s*(.*)", line) - if match: + if match: # pragma: no cover - regex always matches if line starts with "from ." line = f"from {' '.join(match.groups())}" + if group_by_package and line.strip().startswith("from"): + line = line.split(" import", 1)[0] if lexicographical: line = _import_line_intro_re.sub("", _import_line_midline_import_re.sub(".", line)) diff --git a/isort/utils.py b/isort/utils.py index 27f17b4a..63b51990 100644 --- a/isort/utils.py +++ b/isort/utils.py @@ -1,7 +1,5 @@ import os import sys -from contextlib import contextmanager -from typing import Iterator def exists_case_sensitive(path: str) -> bool: @@ -16,14 +14,3 @@ def exists_case_sensitive(path: str) -> bool: directory, basename = os.path.split(path) result = basename in os.listdir(directory) return result - - -@contextmanager -def chdir(path: str) -> Iterator[None]: - """Context manager for changing dir and restoring previous workdir after exit.""" - curdir = os.getcwd() - os.chdir(path) - try: - yield - finally: - os.chdir(curdir) diff --git a/isort/wrap.py b/isort/wrap.py index 872b096e..11542fa0 100644 --- a/isort/wrap.py +++ b/isort/wrap.py @@ -75,11 +75,13 @@ def line(content: str, line_separator: str, config: Config = DEFAULT_CONFIG) -> splitter ): line_parts = re.split(exp, line_without_comment) - if comment: + if comment and not (config.use_parentheses and "noqa" in comment): _comma_maybe = ( "," if (config.include_trailing_comma and config.use_parentheses) else "" ) - line_parts[-1] = f"{line_parts[-1].strip()}{_comma_maybe} #{comment}" + line_parts[ + -1 + ] = f"{line_parts[-1].strip()}{_comma_maybe}{config.comment_prefix}{comment}" next_line = [] while (len(content) + 2) > ( config.wrap_length or config.line_length @@ -104,8 +106,14 @@ def line(content: str, line_separator: str, config: Config = DEFAULT_CONFIG) -> _separator = line_separator else: _separator = "" + _comment = "" + if comment and "noqa" in comment: + _comment = f"{config.comment_prefix}{comment}" + cont_line = cont_line.rstrip() + _comma = "," if config.include_trailing_comma else "" output = ( - f"{content}{splitter}({line_separator}{cont_line}{_comma}{_separator})" + f"{content}{splitter}({_comment}" + f"{line_separator}{cont_line}{_comma}{_separator})" ) lines = output.split(line_separator) if config.comment_prefix in lines[-1] and lines[-1].endswith(")"): |
